diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..d9709c28affa43d035d2c32ae46291a9143f927d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,44 @@ +# How to contribute to `dora-rs` + +We welcome bug reports, feature requests, and pull requests! + +Please discuss non-trivial changes in a Github issue or on Discord first before implementing them. +This way, we can avoid unnecessary work on both sides. + +## Building + +The `dora` project is set up as a [cargo workspace](https://doc.rust-lang.org/cargo/reference/workspaces.html). +You can use the standard `cargo check`, `cargo build`, `cargo run`, and `cargo test` commands. +To run a command for a specific package only, pass e.g. `--package dora-daemon`. +Running a command for the whole workspace is possible by passing `--workspace`. + +## Continuous Integration (CI) + +We're using [GitHub Actions](https://github.com/features/actions) to run automated checks on all commits and pull requests. +These checks ensure that our `main` branch always builds successfully and that it passes all tests. +Please ensure that your pull request passes all checks. +You don't need to fix warnings that are unrelated to your changes. +Feel free to ask for help if you're unsure about a check failure. + +We're currently running the following kind of checks: + +- **CI / Test:** Ensures that the project builds and that all unit tests pass. This check is run on Linux, Windows, and macOS. +- **CI / Examples:** Builds and runs the Rust, C, and C++ dataflows from the `examples` subdirectory. This check is run on Linux, Windows, and macOS. +- **CI-python / Python Examples:** Builds and runs the Python dataflows from the `examples` subdirectory. This check is run on Linux only. +- **github pages / deploy:** Generates our website from the `docs` subfolder. +- **CI / CLI Test:** Runs some basic tests of the `dora` command-line application. This check is run on Linux, Windows, and macOS. +- **CI / Clippy:** Runs the additional checks of the [`clippy`](https://github.com/rust-lang/rust-clippy) project. +- **CI / Formatting:** Ensures that the code is formatted using `rustfmt` (see [below](#style)) +- **CI / License Checks:** Scans the dependency tree and tries to detect possible license incompatibilities. + +## Style + +We use [`rustfmt`](https://github.com/rust-lang/rustfmt) with its default settings to format our code. +Please run `cargo fmt --all` on your code before submitting a pull request. +Our CI will run an automatic formatting check of your code. + +## Publishing new Versions + +The maintainers are responsible for publishing new versions of the `dora` crates. +Please don't open unsolicited pull requests to create new releases. +Instead, request a new version by opening an issue or by leaving a comment on a merged PR. diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..20f9639e1cb7ea48cdd86e8c3c0d83909a17aa16 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,11443 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ab_glyph" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e53b0a3d5760cd2ba9b787ae0c6440ad18ee294ff71b05e3381c900a7d16cfd" +dependencies = [ + "ab_glyph_rasterizer", + "owned_ttf_parser", +] + +[[package]] +name = "ab_glyph_rasterizer" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046" + +[[package]] +name = "accesskit" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74a4b14f3d99c1255dcba8f45621ab1a2e7540a0009652d33989005a4d0bfc6b" +dependencies = [ + "enumn", + "serde", +] + +[[package]] +name = "accesskit_consumer" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c17cca53c09fbd7288667b22a201274b9becaa27f0b91bf52a526db95de45e6" +dependencies = [ + "accesskit", +] + +[[package]] +name = "accesskit_macos" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3b6ae1eabbfbced10e840fd3fce8a93ae84f174b3e4ba892ab7bcb42e477a7" +dependencies = [ + "accesskit", + "accesskit_consumer", + "objc2 0.3.0-beta.3.patch-leaks.3", + "once_cell", +] + +[[package]] +name = "accesskit_unix" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f46c18d99ba61ad7123dd13eeb0c104436ab6af1df6a1cd8c11054ed394a08" +dependencies = [ + "accesskit", + "accesskit_consumer", + "async-channel 2.3.1", + "async-once-cell", + "atspi", + "futures-lite 1.13.0", + "once_cell", + "serde", + "zbus", +] + +[[package]] +name = "accesskit_windows" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afcae27ec0974fc7c3b0b318783be89fd1b2e66dd702179fe600166a38ff4a0b" +dependencies = [ + "accesskit", + "accesskit_consumer", + "once_cell", + "paste", + "static_assertions", + "windows 0.48.0", +] + +[[package]] +name = "accesskit_winit" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5284218aca17d9e150164428a0ebc7b955f70e3a9a78b4c20894513aabf98a67" +dependencies = [ + "accesskit", + "accesskit_macos", + "accesskit_unix", + "accesskit_windows", + "winit", +] + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if 1.0.0", + "cipher", + "cpufeatures", +] + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if 1.0.0", + "const-random", + "getrandom", + "once_cell", + "serde", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" +dependencies = [ + "serde", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "allocator-api2" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + +[[package]] +name = "android-activity" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee91c0c2905bae44f84bfa4e044536541df26b7703fd0888deeb9060fcc44289" +dependencies = [ + "android-properties", + "bitflags 2.5.0", + "cc", + "cesu8", + "jni", + "jni-sys", + "libc", + "log", + "ndk", + "ndk-context", + "ndk-sys", + "num_enum", + "thiserror", +] + +[[package]] +name = "android-properties" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7eb209b1518d6bb87b283c20095f5228ecda460da70b44f0802523dea6da04" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_colours" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a1558bd2075d341b9ca698ec8eb6fcc55a746b1fc4255585aad5b141d918a80" +dependencies = [ + "rgb", +] + +[[package]] +name = "anstream" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" + +[[package]] +name = "anstyle-parse" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" + +[[package]] +name = "arboard" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb4009533e8ff8f1450a5bcbc30f4242a1d34442221f72314bea1f5dc9c7f89" +dependencies = [ + "clipboard-win", + "core-graphics", + "image 0.25.1", + "log", + "objc2 0.5.2", + "objc2-app-kit", + "objc2-foundation", + "parking_lot", + "windows-sys 0.48.0", + "x11rb", +] + +[[package]] +name = "array-init" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" + +[[package]] +name = "array-init-cursor" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7d0a018de4f6aa429b9d33d69edf69072b1c5b1cb8d3e4a5f7ef898fc3eb76" + +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + +[[package]] +name = "arrow" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae9728f104939be6d8d9b368a354b4929b0569160ea1641f0721b55a861ce38" +dependencies = [ + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-csv", + "arrow-data", + "arrow-ipc", + "arrow-json", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", + "pyo3", +] + +[[package]] +name = "arrow-arith" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7029a5b3efbeafbf4a12d12dc16b8f9e9bff20a410b8c25c5d28acc089e1043" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "num", +] + +[[package]] +name = "arrow-array" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d33238427c60271710695f17742f45b1a5dc5bcfc5c15331c25ddfe7abf70d97" +dependencies = [ + "ahash", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "hashbrown 0.14.5", + "num", +] + +[[package]] +name = "arrow-buffer" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9b95e825ae838efaf77e366c00d3fc8cca78134c9db497d6bda425f2e7b7c1" +dependencies = [ + "bytes", + "half", + "num", +] + +[[package]] +name = "arrow-cast" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cf8385a9d5b5fcde771661dd07652b79b9139fea66193eda6a88664400ccab" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "atoi", + "base64 0.22.1", + "chrono", + "half", + "lexical-core", + "num", + "ryu", +] + +[[package]] +name = "arrow-csv" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea5068bef430a86690059665e40034625ec323ffa4dd21972048eebb0127adc" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "csv", + "csv-core", + "lazy_static", + "lexical-core", + "regex", +] + +[[package]] +name = "arrow-data" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb29be98f987bcf217b070512bb7afba2f65180858bca462edf4a39d84a23e10" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num", +] + +[[package]] +name = "arrow-format" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07884ea216994cdc32a2d5f8274a8bee979cfe90274b83f86f440866ee3132c7" +dependencies = [ + "planus", + "serde", +] + +[[package]] +name = "arrow-ipc" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc68f6523970aa6f7ce1dc9a33a7d9284cfb9af77d4ad3e617dbe5d79cc6ec8" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "flatbuffers 24.3.25", +] + +[[package]] +name = "arrow-json" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2041380f94bd6437ab648e6c2085a045e45a0c44f91a1b9a4fe3fed3d379bfb1" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "indexmap 2.2.6", + "lexical-core", + "num", + "serde", + "serde_json", +] + +[[package]] +name = "arrow-ord" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb56ed1547004e12203652f12fe12e824161ff9d1e5cf2a7dc4ff02ba94f413" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "half", + "num", +] + +[[package]] +name = "arrow-row" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "575b42f1fc588f2da6977b94a5ca565459f5ab07b60545e17243fb9a7ed6d43e" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half", + "hashbrown 0.14.5", +] + +[[package]] +name = "arrow-schema" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32aae6a60458a2389c0da89c9de0b7932427776127da1a738e2efc21d32f3393" +dependencies = [ + "bitflags 2.5.0", + "serde", +] + +[[package]] +name = "arrow-select" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de36abaef8767b4220d7b4a8c2fe5ffc78b47db81b03d77e2136091c3ba39102" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num", +] + +[[package]] +name = "arrow-string" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e435ada8409bcafc910bc3e0077f532a4daa20e99060a496685c0e3e53cc2597" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "memchr", + "num", + "regex", + "regex-syntax 0.8.3", +] + +[[package]] +name = "as-raw-xcb-connection" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175571dd1d178ced59193a6fc02dde1b972eb0bc56c892cde9beeceac5bf0f6b" + +[[package]] +name = "ash" +version = "0.37.3+1.3.251" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e9c3835d686b0a6084ab4234fcd1b07dbf6e4767dce60874b12356a25ecd4a" +dependencies = [ + "libloading 0.7.4", +] + +[[package]] +name = "ashpd" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ac22eda5891cc086690cb6fa10121c0390de0e3b04eb269f2d766b00d3f2d81" +dependencies = [ + "async-fs 2.1.2", + "async-net", + "enumflags2", + "futures-channel", + "futures-util", + "once_cell", + "rand", + "serde", + "serde_repr", + "url", + "zbus", +] + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-broadcast" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c48ccdbf6ca6b121e0f586cbc0e73ae440e56c67c30fa0873b4e110d9c26d2b" +dependencies = [ + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy 0.5.2", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand 2.1.0", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-fs" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "blocking", + "futures-lite 1.13.0", +] + +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io 2.3.2", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.7.0", + "rustix 0.38.34", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io 2.3.2", + "blocking", + "futures-lite 2.3.0", +] + +[[package]] +name = "async-once-cell" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9338790e78aa95a416786ec8389546c4b6a1dfc3dc36071ed9518a9413a542eb" + +[[package]] +name = "async-process" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +dependencies = [ + "async-io 1.13.0", + "async-lock 2.8.0", + "async-signal", + "blocking", + "cfg-if 1.0.0", + "event-listener 3.1.0", + "futures-lite 1.13.0", + "rustix 0.38.34", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "async-rustls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93b21a03b7c21702a0110f9f8d228763a533570deb376119042dabf33c37a01a" +dependencies = [ + "futures-io", + "rustls 0.20.9", + "webpki", +] + +[[package]] +name = "async-signal" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afe66191c335039c7bb78f99dc7520b0cbb166b3a1cb33a03f53d8a1c6f2afda" +dependencies = [ + "async-io 2.3.2", + "async-lock 3.3.0", + "atomic-waker", + "cfg-if 1.0.0", + "futures-core", + "futures-io", + "rustix 0.38.34", + "signal-hook-registry", + "slab", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + +[[package]] +name = "async-trait" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atspi" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6059f350ab6f593ea00727b334265c4dfc7fd442ee32d264794bd9bdc68e87ca" +dependencies = [ + "atspi-common", + "atspi-connection", + "atspi-proxies", +] + +[[package]] +name = "atspi-common" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92af95f966d2431f962bc632c2e68eda7777330158bf640c4af4249349b2cdf5" +dependencies = [ + "enumflags2", + "serde", + "static_assertions", + "zbus", + "zbus_names", + "zvariant", +] + +[[package]] +name = "atspi-connection" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c65e7d70f86d4c0e3b2d585d9bf3f979f0b19d635a336725a88d279f76b939" +dependencies = [ + "atspi-common", + "atspi-proxies", + "futures-lite 1.13.0", + "zbus", +] + +[[package]] +name = "atspi-proxies" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6495661273703e7a229356dcbe8c8f38223d697aacfaf0e13590a9ac9977bb52" +dependencies = [ + "atspi-common", + "serde", + "zbus", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + +[[package]] +name = "backtrace" +version = "0.3.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +dependencies = [ + "addr2line", + "cc", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bat" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dcc9e5637c2330d8eb7b920f2aa5d9e184446c258466f825ea1412c7614cc86" +dependencies = [ + "ansi_colours", + "bincode", + "bugreport", + "bytesize", + "clap 4.5.6", + "clircle", + "console", + "content_inspector", + "encoding_rs", + "etcetera", + "flate2", + "git2", + "globset", + "grep-cli", + "home", + "nu-ansi-term 0.49.0", + "once_cell", + "path_abs", + "plist", + "regex", + "semver", + "serde", + "serde_yaml 0.9.34+deprecated", + "shell-words", + "syntect", + "thiserror", + "unicode-width", + "walkdir", + "wild", +] + +[[package]] +name = "benchmark-example-node" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", + "futures", + "rand", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "benchmark-example-sink" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "bytemuck", + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "block" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-sys" +version = "0.1.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa55741ee90902547802152aaf3f8e5248aab7e21468089560d4c8840561146" +dependencies = [ + "objc-sys 0.2.0-beta.2", +] + +[[package]] +name = "block-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae85a0696e7ea3b835a453750bf002770776609115e6d25c6d2ff28a8200f7e7" +dependencies = [ + "objc-sys 0.3.5", +] + +[[package]] +name = "block2" +version = "0.2.0-alpha.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd9e63c1744f755c2f60332b88de39d341e5e86239014ad839bd71c106dec42" +dependencies = [ + "block-sys 0.1.0-beta.1", + "objc2-encode 2.0.0-pre.2", +] + +[[package]] +name = "block2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b55663a85f33501257357e6421bb33e769d5c9ffb5ba0921c975a123e35e68" +dependencies = [ + "block-sys 0.2.1", + "objc2 0.4.1", +] + +[[package]] +name = "block2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c132eebf10f5cad5289222520a4a058514204aed6d791f1cf4fe8088b82d15f" +dependencies = [ + "objc2 0.5.2", +] + +[[package]] +name = "blocking" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" +dependencies = [ + "async-channel 2.3.1", + "async-lock 3.3.0", + "async-task", + "futures-io", + "futures-lite 2.3.0", + "piper", +] + +[[package]] +name = "brotli" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata 0.1.10", +] + +[[package]] +name = "bstr" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" +dependencies = [ + "memchr", + "regex-automata 0.4.6", + "serde", +] + +[[package]] +name = "bugreport" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "535120b8182547808081a66f1f77a64533c780b23da26763e0ee34dfb94f98c9" +dependencies = [ + "git-version", + "shell-escape", + "sys-info", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "bytecount" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" + +[[package]] +name = "bytemuck" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +dependencies = [ + "serde", +] + +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" + +[[package]] +name = "cache-padded" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "981520c98f422fcc584dc1a95c334e6953900b9106bc47a9839b81790009eb21" + +[[package]] +name = "calloop" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fba7adb4dd5aa98e5553510223000e7148f621165ec5f9acd7113f6ca4995298" +dependencies = [ + "bitflags 2.5.0", + "log", + "polling 3.7.0", + "rustix 0.38.34", + "slab", + "thiserror", +] + +[[package]] +name = "calloop-wayland-source" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0ea9b9476c7fad82841a8dbb380e2eae480c21910feba80725b46931ed8f02" +dependencies = [ + "calloop", + "rustix 0.38.34", + "wayland-backend", + "wayland-client", +] + +[[package]] +name = "camino" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" +dependencies = [ + "jobserver", + "libc", + "once_cell", +] + +[[package]] +name = "cdr-encoding-size" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac9ba34127578914d8674773f48f686c1df8f37bba0f5b88ab5c78ca04b7f2fb" +dependencies = [ + "cdr-encoding-size-derive", +] + +[[package]] +name = "cdr-encoding-size-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c46953796c44a6488a02ce4b4b3dc0ff30c3cf9107a4ed5e17ceac875d1e9fb6" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cfb" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38f2da7a0a2c4ccf0065be06397cc26a81f4e528be095826eee9d4adbb8c60f" +dependencies = [ + "byteorder", + "fnv", + "uuid", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.5", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "clang-format" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "696283b40e1a39d208ee614b92e5f6521d16962edeb47c48372585ec92419943" +dependencies = [ + "thiserror", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive 3.2.25", + "clap_lex 0.2.4", + "indexmap 1.9.3", + "once_cell", + "strsim 0.10.0", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap" +version = "4.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7" +dependencies = [ + "clap_builder", + "clap_derive 4.5.5", +] + +[[package]] +name = "clap_builder" +version = "4.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df" +dependencies = [ + "anstream", + "anstyle", + "clap_lex 0.7.0", + "strsim 0.11.1", + "terminal_size", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "clap_derive" +version = "4.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "clap_lex" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" + +[[package]] +name = "clean-path" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaa6b4b263a5d737e9bf6b7c09b72c41a5480aec4d7219af827f6564e950b6a5" + +[[package]] +name = "clipboard-win" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad" +dependencies = [ + "error-code", +] + +[[package]] +name = "clircle" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e87cbed5354f17bd8ca8821a097fb62599787fe8f611743fad7ee156a0a600" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "serde", + "winapi 0.3.9", +] + +[[package]] +name = "cocoa" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6140449f97a6e97f9511815c5632d84c8aacf8ac271ad77c559218161a1373c" +dependencies = [ + "bitflags 1.3.2", + "block", + "cocoa-foundation", + "core-foundation", + "core-graphics", + "foreign-types", + "libc", + "objc", +] + +[[package]] +name = "cocoa-foundation" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c6234cbb2e4c785b456c0644748b1ac416dd045799740356f8363dfe00c93f7" +dependencies = [ + "bitflags 1.3.2", + "block", + "core-foundation", + "core-graphics-types", + "libc", + "objc", +] + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + +[[package]] +name = "colorchoice" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" + +[[package]] +name = "com" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e17887fd17353b65b1b2ef1c526c83e26cd72e74f598a8dc1bee13a48f3d9f6" +dependencies = [ + "com_macros", +] + +[[package]] +name = "com_macros" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d375883580a668c7481ea6631fc1a8863e33cc335bf56bfad8d7e6d4b04b13a5" +dependencies = [ + "com_macros_support", + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "com_macros_support" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad899a1087a9296d5644792d7cb72b8e34c1bec8e7d4fbc002230169a6e8710c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "comfy-table" +version = "7.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +dependencies = [ + "strum 0.26.2", + "strum_macros 0.26.2", + "unicode-width", +] + +[[package]] +name = "communication-layer-pub-sub" +version = "0.3.4" +dependencies = [ + "flume 0.10.14", + "zenoh", +] + +[[package]] +name = "communication-layer-request-reply" +version = "0.3.4" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.52.0", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "content_inspector" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7bda66e858c683005a53a9a60c69a4aca7eeaa45d124526e389f7aec8e62f38" +dependencies = [ + "memchr", +] + +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "core-graphics" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c07782be35f9e1140080c6b96f0d44b739e2278479f64e02fdab4e32dfd8b081" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-graphics-types", + "foreign-types", + "libc", +] + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crossterm" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" +dependencies = [ + "bitflags 1.3.2", + "crossterm_winapi", + "libc", + "mio 0.8.11", + "parking_lot", + "signal-hook", + "signal-hook-mio", + "winapi 0.3.9", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + +[[package]] +name = "ctrlc" +version = "3.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +dependencies = [ + "nix 0.28.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "cursor-icon" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96a6ac251f4a2aca6b3f91340350eab87ae57c3f127ffeb585e92bd336717991" + +[[package]] +name = "cxx" +version = "1.0.123" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8194f089b6da4751d6c1da1ef37c17255df51f9346cdb160f8b096562ae4a85c" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.123" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8df9a089caae66634d754672d5f909395f30f38af6ff19366980d8a8b57501" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn 2.0.65", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.123" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25290be4751803672a70b98c68b51c1e7d0a640ab5a4377f240f9d2e70054cd1" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.123" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8cb317cb13604b4752416783bb25070381c36e844743e4146b7f8e55de7d140" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "darling" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.65", +] + +[[package]] +name = "darling_macro" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "directories-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys 0.3.7", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys 0.4.1", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + +[[package]] +name = "dispatch" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b" + +[[package]] +name = "dlib" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412" +dependencies = [ + "libloading 0.8.3", +] + +[[package]] +name = "document-features" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95" +dependencies = [ + "litrs", +] + +[[package]] +name = "dora-arrow-convert" +version = "0.3.4" +dependencies = [ + "arrow", + "eyre", +] + +[[package]] +name = "dora-cli" +version = "0.3.4" +dependencies = [ + "bat", + "clap 4.5.6", + "communication-layer-request-reply", + "ctrlc", + "dora-coordinator", + "dora-core", + "dora-daemon", + "dora-node-api-c", + "dora-operator-api-c", + "dora-runtime", + "dora-tracing", + "duration-str", + "eyre", + "futures", + "inquire", + "notify 5.2.0", + "serde", + "serde_json", + "serde_yaml 0.9.34+deprecated", + "termcolor", + "tokio", + "tokio-stream", + "tracing", + "uuid", + "webbrowser", +] + +[[package]] +name = "dora-coordinator" +version = "0.3.4" +dependencies = [ + "ctrlc", + "dora-core", + "dora-tracing", + "eyre", + "futures", + "futures-concurrency", + "names", + "serde_json", + "tokio", + "tokio-stream", + "tracing", + "uuid", +] + +[[package]] +name = "dora-core" +version = "0.3.4" +dependencies = [ + "aligned-vec", + "dora-message", + "eyre", + "once_cell", + "schemars", + "serde", + "serde-with-expand-env", + "serde_json", + "serde_yaml 0.9.34+deprecated", + "tokio", + "tracing", + "uuid", + "which", +] + +[[package]] +name = "dora-daemon" +version = "0.3.4" +dependencies = [ + "aligned-vec", + "async-trait", + "bincode", + "ctrlc", + "dora-arrow-convert", + "dora-core", + "dora-download", + "dora-node-api", + "dora-tracing", + "eyre", + "flume 0.10.14", + "futures", + "futures-concurrency", + "serde_json", + "serde_yaml 0.8.26", + "shared-memory-server", + "sysinfo 0.30.12", + "tokio", + "tokio-stream", + "tracing", + "tracing-opentelemetry", + "uuid", + "which", +] + +[[package]] +name = "dora-download" +version = "0.3.4" +dependencies = [ + "eyre", + "reqwest", + "tokio", + "tracing", +] + +[[package]] +name = "dora-examples" +version = "0.0.0" +dependencies = [ + "dora-coordinator", + "dora-core", + "dora-download", + "dora-tracing", + "dunce", + "eyre", + "futures", + "serde_yaml 0.8.26", + "tokio", + "tokio-stream", + "tracing", + "uuid", +] + +[[package]] +name = "dora-message" +version = "0.3.4" +dependencies = [ + "arrow-data", + "arrow-schema", + "eyre", + "serde", + "uhlc", +] + +[[package]] +name = "dora-metrics" +version = "0.3.4" +dependencies = [ + "eyre", + "opentelemetry 0.22.0", + "opentelemetry-otlp", + "opentelemetry-system-metrics", + "opentelemetry_sdk 0.22.1", +] + +[[package]] +name = "dora-node-api" +version = "0.3.4" +dependencies = [ + "aligned-vec", + "arrow", + "bincode", + "dora-arrow-convert", + "dora-core", + "dora-tracing", + "eyre", + "flume 0.10.14", + "futures", + "futures-concurrency", + "futures-timer", + "serde_yaml 0.8.26", + "shared-memory-server", + "shared_memory_extended", + "tokio", + "tracing", +] + +[[package]] +name = "dora-node-api-c" +version = "0.3.4" +dependencies = [ + "arrow-array", + "dora-node-api", + "eyre", + "tracing", +] + +[[package]] +name = "dora-node-api-cxx" +version = "0.3.4" +dependencies = [ + "cxx", + "cxx-build", + "dora-node-api", + "dora-ros2-bridge", + "dora-ros2-bridge-msg-gen", + "eyre", + "futures-lite 2.3.0", + "prettyplease 0.1.25", + "rust-format", + "serde", + "serde-big-array", +] + +[[package]] +name = "dora-node-api-python" +version = "0.3.4" +dependencies = [ + "arrow", + "dora-node-api", + "dora-operator-api-python", + "dora-ros2-bridge-python", + "dora-runtime", + "eyre", + "flume 0.10.14", + "futures", + "pyo3", + "pythonize", + "serde_yaml 0.8.26", +] + +[[package]] +name = "dora-operator-api" +version = "0.3.4" +dependencies = [ + "dora-arrow-convert", + "dora-operator-api-macros", + "dora-operator-api-types", +] + +[[package]] +name = "dora-operator-api-c" +version = "0.3.4" +dependencies = [ + "dora-operator-api-types", +] + +[[package]] +name = "dora-operator-api-cxx" +version = "0.3.4" +dependencies = [ + "cxx", + "cxx-build", + "dora-operator-api", +] + +[[package]] +name = "dora-operator-api-macros" +version = "0.3.4" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "dora-operator-api-python" +version = "0.3.4" +dependencies = [ + "aligned-vec", + "arrow", + "arrow-schema", + "dora-node-api", + "eyre", + "flume 0.10.14", + "pyo3", + "serde_yaml 0.8.26", +] + +[[package]] +name = "dora-operator-api-types" +version = "0.3.4" +dependencies = [ + "arrow", + "dora-arrow-convert", + "safer-ffi", +] + +[[package]] +name = "dora-record" +version = "0.3.4" +dependencies = [ + "chrono", + "dora-node-api", + "dora-tracing", + "eyre", + "parquet", + "tokio", +] + +[[package]] +name = "dora-rerun" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", + "ndarray", + "rerun", + "tokio", +] + +[[package]] +name = "dora-ros2-bridge" +version = "0.1.0" +dependencies = [ + "array-init", + "dora-daemon", + "dora-ros2-bridge-msg-gen", + "eyre", + "flume 0.11.0", + "futures", + "futures-timer", + "rand", + "ros2-client", + "rust-format", + "rustdds", + "serde", + "serde-big-array", + "tokio", + "tracing", + "tracing-subscriber", + "widestring", +] + +[[package]] +name = "dora-ros2-bridge-msg-gen" +version = "0.1.0" +dependencies = [ + "anyhow", + "glob", + "heck 0.3.3", + "nom", + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", + "thiserror", + "tracing", +] + +[[package]] +name = "dora-ros2-bridge-python" +version = "0.1.0" +dependencies = [ + "arrow", + "dora-ros2-bridge", + "dora-ros2-bridge-msg-gen", + "eyre", + "futures", + "pyo3", + "serde", + "serde_assert", +] + +[[package]] +name = "dora-runtime" +version = "0.3.4" +dependencies = [ + "aligned-vec", + "arrow", + "dora-core", + "dora-download", + "dora-metrics", + "dora-node-api", + "dora-operator-api-python", + "dora-operator-api-types", + "dora-tracing", + "eyre", + "flume 0.10.14", + "futures", + "futures-concurrency", + "libloading 0.7.4", + "pyo3", + "pythonize", + "serde_yaml 0.8.26", + "tokio", + "tokio-stream", + "tracing", + "tracing-opentelemetry", +] + +[[package]] +name = "dora-tracing" +version = "0.3.4" +dependencies = [ + "eyre", + "opentelemetry 0.18.0", + "opentelemetry-jaeger", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", +] + +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + +[[package]] +name = "duration-str" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f037c488d179e21c87ef5fa9c331e8e62f5dddfa84618b41bb197da03edff1" +dependencies = [ + "chrono", + "nom", + "rust_decimal", + "serde", + "thiserror", + "time", +] + +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + +[[package]] +name = "ecolor" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20930a432bbd57a6d55e07976089708d4893f3d556cf42a0d79e9e321fa73b10" +dependencies = [ + "bytemuck", + "serde", +] + +[[package]] +name = "eframe" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020e2ccef6bbcec71dbc542f7eed64a5846fc3076727f5746da8fd307c91bab2" +dependencies = [ + "bytemuck", + "cocoa", + "directories-next", + "document-features", + "egui", + "egui-wgpu", + "egui-winit", + "egui_glow", + "image 0.24.9", + "js-sys", + "log", + "objc", + "parking_lot", + "percent-encoding", + "pollster", + "puffin", + "raw-window-handle 0.6.2", + "ron", + "serde", + "static_assertions", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "web-time", + "wgpu", + "winapi 0.3.9", + "winit", +] + +[[package]] +name = "egui" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "584c5d1bf9a67b25778a3323af222dbe1a1feb532190e103901187f92c7fe29a" +dependencies = [ + "accesskit", + "ahash", + "backtrace", + "epaint", + "log", + "nohash-hasher", + "puffin", + "ron", + "serde", +] + +[[package]] +name = "egui-wgpu" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469ff65843f88a702b731a1532b7d03b0e8e96d283e70f3a22b0e06c46cb9b37" +dependencies = [ + "bytemuck", + "document-features", + "egui", + "epaint", + "log", + "puffin", + "thiserror", + "type-map", + "web-time", + "wgpu", + "winit", +] + +[[package]] +name = "egui-winit" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e3da0cbe020f341450c599b35b92de4af7b00abde85624fd16f09c885573609" +dependencies = [ + "accesskit_winit", + "arboard", + "egui", + "log", + "puffin", + "raw-window-handle 0.6.2", + "serde", + "smithay-clipboard", + "web-time", + "webbrowser", + "winit", +] + +[[package]] +name = "egui_commonmark" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e30fc4d40a8ef399a8debfbdae0462ca45912d81b9e81b24373337669e961201" +dependencies = [ + "egui", + "egui_extras", + "pulldown-cmark 0.10.3", +] + +[[package]] +name = "egui_extras" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b78779f35ded1a853786c9ce0b43fe1053e10a21ea3b23ebea411805ce41593" +dependencies = [ + "egui", + "ehttp", + "enum-map", + "image 0.24.9", + "log", + "mime_guess2", + "puffin", + "serde", +] + +[[package]] +name = "egui_glow" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0e5d975f3c86edc3d35b1db88bb27c15dde7c55d3b5af164968ab5ede3f44ca" +dependencies = [ + "bytemuck", + "egui", + "egui-winit", + "glow", + "log", + "memoffset 0.9.1", + "puffin", + "wasm-bindgen", + "web-sys", + "winit", +] + +[[package]] +name = "egui_plot" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7854b86dc1c2d352c5270db3d600011daa913d6b554141a03939761323288a1" +dependencies = [ + "egui", +] + +[[package]] +name = "egui_tiles" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2c0ff99daddcbdc54b141dbb7be3b014463da48a03ebc801bf63e500b23d75" +dependencies = [ + "ahash", + "egui", + "itertools 0.12.1", + "log", + "serde", +] + +[[package]] +name = "ehttp" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59a81c221a1e4dad06cb9c9deb19aea1193a5eea084e8cd42d869068132bf876" +dependencies = [ + "document-features", + "futures-util", + "js-sys", + "ureq", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", +] + +[[package]] +name = "either" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" + +[[package]] +name = "emath" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c3a552cfca14630702449d35f41c84a0d15963273771c6059175a803620f3f" +dependencies = [ + "bytemuck", + "serde", +] + +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + +[[package]] +name = "encoding_rs" +version = "0.8.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", + "serde", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "enumflags2" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3278c9d5fb675e0a51dabcf4c0d355f692b064171535ba72361be1528a9d8e8d" +dependencies = [ + "enumflags2_derive", + "serde", +] + +[[package]] +name = "enumflags2_derive" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c785274071b1b420972453b306eeca06acf4633829db4223b58a2a8c5953bc4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "enumn" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "enumset" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "226c0da7462c13fb57e5cc9e0dc8f0635e7d27f276a3a7fd30054647f669007d" +dependencies = [ + "enumset_derive", +] + +[[package]] +name = "enumset_derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "epaint" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381f8b149657a4acf837095351839f32cd5c4aec1817fc4df84e18d76334176" +dependencies = [ + "ab_glyph", + "ahash", + "bytemuck", + "ecolor", + "emath", + "log", + "nohash-hasher", + "parking_lot", + "puffin", + "rayon", + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + +[[package]] +name = "error-code" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if 1.0.0", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "ethnum" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b90ca2580b73ab6a1f724b76ca11ab632df820fd6040c336200d2c1df7b3c82c" + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite", +] + +[[package]] +name = "ewebsock" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6177769715c6ec5a324acee995183b22721ea23c58e49af14a828eadec85d120" +dependencies = [ + "document-features", + "js-sys", + "log", + "tungstenite 0.21.0", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "ext-trait" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d772df1c1a777963712fb68e014235e80863d6a91a85c4e06ba2d16243a310e5" +dependencies = [ + "ext-trait-proc_macros", +] + +[[package]] +name = "ext-trait-proc_macros" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab7934152eaf26aa5aa9f7371408ad5af4c31357073c9e84c3b9d7f11ad639a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "extension-traits" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a296e5a895621edf9fa8329c83aa1cb69a964643e36cf54d8d7a69b789089537" +dependencies = [ + "ext-trait", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "fdeflate" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f9bfee30e4dedf0ab8b422f03af778d9612b63f502710fc500a334ebe2de645" +dependencies = [ + "simd-adler32", +] + +[[package]] +name = "filetime" +version = "0.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.4.1", + "windows-sys 0.52.0", +] + +[[package]] +name = "fixed" +version = "1.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fc715d38bea7b5bf487fcd79bcf8c209f0b58014f3018a7a19c2b855f472048" +dependencies = [ + "az", + "bytemuck", + "half", + "serde", + "typenum", +] + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flatbuffers" +version = "23.5.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + +[[package]] +name = "flatbuffers" +version = "24.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f" +dependencies = [ + "bitflags 1.3.2", + "rustc_version", +] + +[[package]] +name = "flate2" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "flume" +version = "0.10.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "pin-project", + "spin 0.9.8", +] + +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "foreign_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee1b05cbd864bcaecbd3455d6d967862d446e4ebfc3c2e5e5b9841e53cba6673" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags 1.3.2", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-buffered" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02dcae03ee5afa5ea17b1aebc793806b8ddfc6dc500e0b8e8e1eb30b9dad22c0" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-concurrency" +version = "7.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b14ac911e85d57c5ea6eef76d7b4d4a3177ecd15f4bea2e61927e9e3823e19f" +dependencies = [ + "bitvec", + "futures-buffered", + "futures-core", + "futures-lite 1.13.0", + "pin-project", + "slab", + "smallvec", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.1.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "gethostname" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818" +dependencies = [ + "libc", + "windows-targets 0.48.5", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "git-version" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" +dependencies = [ + "git-version-macro", +] + +[[package]] +name = "git-version-macro" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "git2" +version = "0.18.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" +dependencies = [ + "bitflags 2.5.0", + "libc", + "libgit2-sys", + "log", + "url", +] + +[[package]] +name = "gl_generator" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a95dfc23a2b4a9a2f5ab41d194f8bfda3cabec42af4e39f08c339eb2a0c124d" +dependencies = [ + "khronos_api", + "log", + "xml-rs", +] + +[[package]] +name = "glam" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f597d56c1bd55a811a1be189459e8fad2bbc272616375602443bdfb37fa774" +dependencies = [ + "bytemuck", + "serde", +] + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "globset" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" +dependencies = [ + "aho-corasick", + "bstr 1.9.1", + "log", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "glow" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd348e04c43b32574f2de31c8bb397d96c9fcfa1371bd4ca6d8bdc464ab121b1" +dependencies = [ + "js-sys", + "slotmap", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gltf" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ce1918195723ce6ac74e80542c5a96a40c2b26162c1957a5cd70799b8cacf7" +dependencies = [ + "base64 0.13.1", + "byteorder", + "gltf-json", + "image 0.25.1", + "lazy_static", + "serde_json", + "urlencoding", +] + +[[package]] +name = "gltf-derive" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14070e711538afba5d6c807edb74bcb84e5dbb9211a3bf5dea0dfab5b24f4c51" +dependencies = [ + "inflections", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "gltf-json" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6176f9d60a7eab0a877e8e96548605dedbde9190a7ae1e80bbcc1c9af03ab14" +dependencies = [ + "gltf-derive", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "glutin_wgl_sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8098adac955faa2d31079b65dc48841251f69efd3ac25477903fc424362ead" +dependencies = [ + "gl_generator", +] + +[[package]] +name = "gpu-alloc" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" +dependencies = [ + "bitflags 2.5.0", + "gpu-alloc-types", +] + +[[package]] +name = "gpu-alloc-types" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" +dependencies = [ + "bitflags 2.5.0", +] + +[[package]] +name = "gpu-allocator" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f56f6318968d03c18e1bcf4857ff88c61157e9da8e47c5f29055d60e1228884" +dependencies = [ + "log", + "presser", + "thiserror", + "winapi 0.3.9", + "windows 0.52.0", +] + +[[package]] +name = "gpu-descriptor" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc11df1ace8e7e564511f53af41f3e42ddc95b56fd07b3f4445d2a6048bc682c" +dependencies = [ + "bitflags 2.5.0", + "gpu-descriptor-types", + "hashbrown 0.14.5", +] + +[[package]] +name = "gpu-descriptor-types" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bf0b36e6f090b7e1d8a4b49c0cb81c1f8376f72198c65dd3ad9ff3556b8b78c" +dependencies = [ + "bitflags 2.5.0", +] + +[[package]] +name = "grep-cli" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea40788c059ab8b622c4d074732750bfb3bd2912e2dd58eabc11798a4d5ad725" +dependencies = [ + "bstr 1.9.1", + "globset", + "libc", + "log", + "termcolor", + "winapi-util", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "bytemuck", + "cfg-if 1.0.0", + "crunchy", + "num-traits", +] + +[[package]] +name = "hash_hasher" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74721d007512d0cb3338cd20f0654ac913920061a4c4d0d8708edb3f2a698c0c" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hassle-rs" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af2a7e73e1f34c48da31fb668a907f250794837e08faa144fd24f0b8b741e890" +dependencies = [ + "bitflags 2.5.0", + "com", + "libc", + "libloading 0.8.3", + "thiserror", + "widestring", + "winapi 0.3.9", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hexf-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +dependencies = [ + "bytes", + "futures-core", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.7", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "httparse", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.28", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", + "pin-project-lite", + "socket2 0.5.7", + "tokio", + "tower", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icrate" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d3aaff8a54577104bafdf686ff18565c3b6903ca5782a2026ef06e2c7aa319" +dependencies = [ + "block2 0.3.0", + "dispatch", + "objc2 0.4.1", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "if-addrs" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624c5448ba529e74f594c65b7024f31b2de7b64a9b228b8df26796bbb6e32c36" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "image" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +dependencies = [ + "bytemuck", + "byteorder", + "color_quant", + "num-traits", + "png", +] + +[[package]] +name = "image" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd54d660e773627692c524beaad361aca785a4f9f5730ce91f42aabe5bce3d11" +dependencies = [ + "bytemuck", + "byteorder", + "num-traits", + "png", + "tiff", + "zune-core", + "zune-jpeg", +] + +[[package]] +name = "indent" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f1a0777d972970f204fdf8ef319f1f4f8459131636d7e3c96c5d59570d0fa6" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", + "serde", +] + +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + +[[package]] +name = "infer" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb33622da908807a06f9513c19b3c1ad50fab3e4137d82a78107d502075aa199" +dependencies = [ + "cfb", +] + +[[package]] +name = "inflections" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a257582fdcde896fd96463bf2d40eefea0580021c0712a0e2b028b60b47a837a" + +[[package]] +name = "inotify" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "inquire" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a94f0659efe59329832ba0452d3ec753145fc1fb12a8e1d60de4ccf99f5364" +dependencies = [ + "bitflags 1.3.2", + "crossterm", + "dyn-clone", + "lazy_static", + "newline-converter", + "thiserror", + "unicode-segmentation", + "unicode-width", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "inventory" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" + +[[package]] +name = "io-extras" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9f046b9af244f13b3bd939f55d16830ac3a201e8a9ba9661bfcb03e2be72b9b" +dependencies = [ + "io-lifetimes 2.0.3", + "windows-sys 0.52.0", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "io-lifetimes" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a611371471e98973dbcab4e0ec66c31a10bc356eeb4d54a0e05eac8158fe38c" + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "ipnetwork" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f84f1612606f3753f205a4e9a2efd6fe5b4c573a6269b2cc6c3003d44a0d127" +dependencies = [ + "serde", +] + +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if 1.0.0", + "combine", + "jni-sys", + "log", + "thiserror", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +dependencies = [ + "libc", +] + +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + +[[package]] +name = "js-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "khronos-egl" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aae1df220ece3c0ada96b8153459b67eebe9ae9212258bb0134ae60416fdf76" +dependencies = [ + "libc", + "libloading 0.8.3", + "pkg-config", +] + +[[package]] +name = "khronos_api" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc" + +[[package]] +name = "kqueue" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "lexical-core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +dependencies = [ + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "libgit2-sys" +version = "0.16.2+1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" +dependencies = [ + "cc", + "libc", + "libz-sys", + "pkg-config", +] + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if 1.0.0", + "winapi 0.3.9", +] + +[[package]] +name = "libloading" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +dependencies = [ + "cfg-if 1.0.0", + "windows-targets 0.48.5", +] + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libredox" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3af92c55d7d839293953fcd0fda5ecfe93297cfde6ffbdec13b41d99c0ba6607" +dependencies = [ + "bitflags 2.5.0", + "libc", + "redox_syscall 0.4.1", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + +[[package]] +name = "libz-sys" +version = "1.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "line-wrap" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd1bc4d24ad230d21fb898d1116b1801d7adfc449d42026475862ab48b11e70e" + +[[package]] +name = "link-cplusplus" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d240c6f7e1ba3a28b0249f774e6a9dd0175054b52dfbb61b16eb8505c3785c9" +dependencies = [ + "cc", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "litrs" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" + +[[package]] +name = "local-ip-address" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" +dependencies = [ + "libc", + "neli", + "thiserror", + "windows-sys 0.48.0", +] + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", + "serde", +] + +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] + +[[package]] +name = "log-once" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d8a05e3879b317b1b6dbf353e5bba7062bedcc59815267bb23eaa0c576cebf0" +dependencies = [ + "log", +] + +[[package]] +name = "lz4_flex" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" +dependencies = [ + "twox-hash", +] + +[[package]] +name = "macaw" +version = "0.18.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fdbfdf07a7e53090afb7fd427eb0a4b46fc51cb484b2deba27b47919762dfb" +dependencies = [ + "glam", + "num-traits", + "serde", +] + +[[package]] +name = "macro_rules_attribute" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862" +dependencies = [ + "macro_rules_attribute-proc_macro", + "paste", +] + +[[package]] +name = "macro_rules_attribute-proc_macro" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d" + +[[package]] +name = "malloc_buf" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" +dependencies = [ + "libc", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "matrixmultiply" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2" +dependencies = [ + "autocfg", + "rawpointer", +] + +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "memmap2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memory-stats" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34f79cf9964c5c9545493acda1263f1912f8d2c56c8a2ffee2606cb960acaacc" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "metal" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43f73953f8cbe511f021b58f18c3ce1c3d1ae13fe953293e13345bf83217f25" +dependencies = [ + "bitflags 2.5.0", + "block", + "core-graphics-types", + "foreign-types", + "log", + "objc", + "paste", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess2" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a3333bb1609500601edc766a39b4c1772874a4ce26022f4d866854dc020c41" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +dependencies = [ + "adler", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +dependencies = [ + "cfg-if 0.1.10", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "mio-extras" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" +dependencies = [ + "lazycell", + "log", + "mio 0.6.23", + "slab", +] + +[[package]] +name = "miow" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "multiple-daemons-example-node" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", + "futures", + "rand", + "tokio", +] + +[[package]] +name = "multiple-daemons-example-operator" +version = "0.3.4" +dependencies = [ + "dora-operator-api", +] + +[[package]] +name = "multiple-daemons-example-sink" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", +] + +[[package]] +name = "naga" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e3524642f53d9af419ab5e8dd29d3ba155708267667c2f3f06c88c9e130843" +dependencies = [ + "bit-set", + "bitflags 2.5.0", + "codespan-reporting", + "hexf-parse", + "indexmap 2.2.6", + "log", + "num-traits", + "rustc-hash", + "spirv", + "termcolor", + "thiserror", + "unicode-xid", +] + +[[package]] +name = "names" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" +dependencies = [ + "clap 3.2.25", + "rand", +] + +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom", +] + +[[package]] +name = "natord" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308d96db8debc727c3fd9744aac51751243420e46edf401010908da7f8d5e57c" + +[[package]] +name = "ndarray" +version = "0.15.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb12d4e967ec485a5f71c6311fe28158e9d6f4bc4a447b474184d0f91a8fa32" +dependencies = [ + "matrixmultiply", + "num-complex", + "num-integer", + "num-traits", + "rawpointer", +] + +[[package]] +name = "ndk" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2076a31b7010b17a38c01907c45b945e8f11495ee4dd588309718901b1f7a5b7" +dependencies = [ + "bitflags 2.5.0", + "jni-sys", + "log", + "ndk-sys", + "num_enum", + "raw-window-handle 0.6.2", + "thiserror", +] + +[[package]] +name = "ndk-context" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" + +[[package]] +name = "ndk-sys" +version = "0.5.0+25.2.9519653" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c196769dd60fd4f363e11d948139556a344e79d451aeb2fa2fd040738ef7691" +dependencies = [ + "jni-sys", +] + +[[package]] +name = "neli" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "net2" +version = "0.2.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "never" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c96aba5aa877601bb3f6dd6a63a969e1f82e60646e81e71b14496995e9853c91" + +[[package]] +name = "newline-converter" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f71d09d5c87634207f894c6b31b6a2b2c64ea3bdcf71bd5599fdbbe1600c00f" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "nix" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" +dependencies = [ + "bitflags 1.3.2", + "cc", + "cfg-if 1.0.0", + "libc", + "memoffset 0.6.5", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if 1.0.0", + "libc", + "memoffset 0.7.1", + "pin-utils", +] + +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.5.0", + "cfg-if 1.0.0", + "cfg_aliases 0.1.1", + "libc", +] + +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "notify" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "729f63e1ca555a43fe3efa4f3efdf4801c479da85b432242a7b726f353c88486" +dependencies = [ + "bitflags 1.3.2", + "crossbeam-channel", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "mio 0.8.11", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "notify" +version = "6.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" +dependencies = [ + "bitflags 2.5.0", + "crossbeam-channel", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio 0.8.11", + "walkdir", + "windows-sys 0.48.0", +] + +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi 0.3.9", +] + +[[package]] +name = "nu-ansi-term" +version = "0.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c073d3c1930d0751774acf49e66653acecb416c3a54c6ec095a9b11caddb5a68" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + +[[package]] +name = "nvml-wrapper" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd21b9f5a1cce3c3515c9ffa85f5c7443e07162dae0ccf4339bb7ca38ad3454" +dependencies = [ + "bitflags 1.3.2", + "libloading 0.7.4", + "nvml-wrapper-sys", + "static_assertions", + "thiserror", + "wrapcenum-derive", +] + +[[package]] +name = "nvml-wrapper-sys" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c961a2ea9e91c59a69b78e69090f6f5b867bb46c0c56de9482da232437c4987e" +dependencies = [ + "libloading 0.7.4", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", + "objc_exception", +] + +[[package]] +name = "objc-foundation" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1add1b659e36c9607c7aab864a76c7a4c2760cd0cd2e120f3fb8b952c7e22bf9" +dependencies = [ + "block", + "objc", + "objc_id", +] + +[[package]] +name = "objc-sys" +version = "0.2.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b9834c1e95694a05a828b59f55fa2afec6288359cda67146126b3f90a55d7" + +[[package]] +name = "objc-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb91bdd390c7ce1a8607f35f3ca7151b65afc0ff5ff3b34fa350f7d7c7e4310" + +[[package]] +name = "objc2" +version = "0.3.0-beta.3.patch-leaks.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e01640f9f2cb1220bbe80325e179e532cb3379ebcd1bf2279d703c19fe3a468" +dependencies = [ + "block2 0.2.0-alpha.6", + "objc-sys 0.2.0-beta.2", + "objc2-encode 2.0.0-pre.2", +] + +[[package]] +name = "objc2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "559c5a40fdd30eb5e344fbceacf7595a81e242529fb4e21cf5f43fb4f11ff98d" +dependencies = [ + "objc-sys 0.3.5", + "objc2-encode 3.0.0", +] + +[[package]] +name = "objc2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46a785d4eeff09c14c487497c162e92766fbb3e4059a71840cecc03d9a50b804" +dependencies = [ + "objc-sys 0.3.5", + "objc2-encode 4.0.3", +] + +[[package]] +name = "objc2-app-kit" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" +dependencies = [ + "bitflags 2.5.0", + "block2 0.5.1", + "libc", + "objc2 0.5.2", + "objc2-core-data", + "objc2-core-image", + "objc2-foundation", + "objc2-quartz-core", +] + +[[package]] +name = "objc2-core-data" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" +dependencies = [ + "bitflags 2.5.0", + "block2 0.5.1", + "objc2 0.5.2", + "objc2-foundation", +] + +[[package]] +name = "objc2-core-image" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55260963a527c99f1819c4f8e3b47fe04f9650694ef348ffd2227e8196d34c80" +dependencies = [ + "block2 0.5.1", + "objc2 0.5.2", + "objc2-foundation", + "objc2-metal", +] + +[[package]] +name = "objc2-encode" +version = "2.0.0-pre.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abfcac41015b00a120608fdaa6938c44cb983fee294351cc4bac7638b4e50512" +dependencies = [ + "objc-sys 0.2.0-beta.2", +] + +[[package]] +name = "objc2-encode" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d079845b37af429bfe5dfa76e6d087d788031045b25cfc6fd898486fd9847666" + +[[package]] +name = "objc2-encode" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7891e71393cd1f227313c9379a26a584ff3d7e6e7159e988851f0934c993f0f8" + +[[package]] +name = "objc2-foundation" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" +dependencies = [ + "bitflags 2.5.0", + "block2 0.5.1", + "libc", + "objc2 0.5.2", +] + +[[package]] +name = "objc2-metal" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" +dependencies = [ + "bitflags 2.5.0", + "block2 0.5.1", + "objc2 0.5.2", + "objc2-foundation", +] + +[[package]] +name = "objc2-quartz-core" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" +dependencies = [ + "bitflags 2.5.0", + "block2 0.5.1", + "objc2 0.5.2", + "objc2-foundation", + "objc2-metal", +] + +[[package]] +name = "objc_exception" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad970fb455818ad6cba4c122ad012fae53ae8b4795f86378bce65e4f6bab2ca4" +dependencies = [ + "cc", +] + +[[package]] +name = "objc_id" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c92d4ddb4bd7b50d730c215ff871754d0da6b2178849f8a2a2ab69712d0c073b" +dependencies = [ + "objc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "onig" +version = "6.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f" +dependencies = [ + "bitflags 1.3.2", + "libc", + "once_cell", + "onig_sys", +] + +[[package]] +name = "onig_sys" +version = "69.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b829e3d7e9cc74c7e315ee8edb185bf4190da5acde74afd7fc59c35b1f086e7" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "opentelemetry" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk 0.18.0", +] + +[[package]] +name = "opentelemetry" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" +dependencies = [ + "async-trait", + "futures", + "futures-executor", + "once_cell", + "opentelemetry 0.18.0", + "opentelemetry-semantic-conventions 0.10.0", + "thiserror", + "thrift 0.16.0", + "tokio", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb" +dependencies = [ + "async-trait", + "futures-core", + "http 0.2.12", + "opentelemetry 0.22.0", + "opentelemetry-proto", + "opentelemetry-semantic-conventions 0.14.0", + "opentelemetry_sdk 0.22.1", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4" +dependencies = [ + "opentelemetry 0.22.0", + "opentelemetry_sdk 0.22.1", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" +dependencies = [ + "opentelemetry 0.18.0", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910" + +[[package]] +name = "opentelemetry-system-metrics" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dca748d4fe59e208f6c71bde86573d326f98ed29696d31dbf7d2454b8f1af2d" +dependencies = [ + "eyre", + "indexmap 1.9.3", + "nvml-wrapper", + "opentelemetry 0.22.0", + "sysinfo 0.29.11", + "tracing", +] + +[[package]] +name = "opentelemetry_api" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" +dependencies = [ + "fnv", + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +dependencies = [ + "async-trait", + "crossbeam-channel", + "dashmap", + "fnv", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "percent-encoding", + "rand", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry 0.22.0", + "ordered-float 4.2.0", + "percent-encoding", + "rand", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "orbclient" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f0d54bde9774d3a51dcf281a5def240c71996bc6ca05d2c847ec8b2b216166" +dependencies = [ + "libredox 0.0.2", +] + +[[package]] +name = "ordered-float" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-stream" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa2b01e1d916879f73a53d01d1d6cee68adbb31d6d9177a8cfce093cced1d50" +dependencies = [ + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owned_ttf_parser" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b41438d2fc63c46c74a2203bf5ccd82c41ba04347b2fcf5754f230b167067d5" +dependencies = [ + "ttf-parser", +] + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "parking_lot" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.5.1", + "smallvec", + "windows-targets 0.52.5", +] + +[[package]] +name = "parquet" +version = "52.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c3b5322cc1bbf67f11c079c42be41a55949099b78732f7dba9e15edde40eab" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-ipc", + "arrow-schema", + "arrow-select", + "base64 0.22.1", + "brotli", + "bytes", + "chrono", + "flate2", + "futures", + "half", + "hashbrown 0.14.5", + "lz4_flex", + "num", + "num-bigint", + "paste", + "seq-macro", + "snap", + "thrift 0.17.0", + "tokio", + "twox-hash", + "zstd", + "zstd-sys", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "path_abs" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ef02f6342ac01d8a93b65f96db53fe68a92a15f41144f97fb00a9e669633c3" +dependencies = [ + "std_prelude", +] + +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + +[[package]] +name = "peg" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f76678828272f177ac33b7e2ac2e3e73cc6c1cd1e3e387928aa69562fa51367" +dependencies = [ + "peg-macros", + "peg-runtime", +] + +[[package]] +name = "peg-macros" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "636d60acf97633e48d266d7415a9355d4389cea327a193f87df395d88cd2b14d" +dependencies = [ + "peg-runtime", + "proc-macro2", + "quote", +] + +[[package]] +name = "peg-runtime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555b1514d2d99d78150d3c799d4c357a3e2c2a8062cd108e93a06d9057629c5" + +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pest" +version = "2.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "pest_meta" +version = "2.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.2.6", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464db0c665917b13ebb5d453ccdec4add5658ee1adc7affc7677615356a8afaf" +dependencies = [ + "atomic-waker", + "fastrand 2.1.0", + "futures-io", +] + +[[package]] +name = "pkcs1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719" +dependencies = [ + "der", + "pkcs8", + "spki", + "zeroize", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "planus" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1691dd09e82f428ce8d6310bd6d5da2557c82ff17694d2a32cad7242aea89f" +dependencies = [ + "array-init-cursor", +] + +[[package]] +name = "plist" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9d34169e64b3c7a80c8621a48adaf44e0cf62c78a9b25dd9dd35f1881a17cf9" +dependencies = [ + "base64 0.21.7", + "indexmap 2.2.6", + "line-wrap", + "quick-xml", + "serde", + "time", +] + +[[package]] +name = "ply-rs" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbadf9cb4a79d516de4c64806fe64ffbd8161d1ac685d000be789fb628b88963" +dependencies = [ + "byteorder", + "linked-hash-map", + "peg", + "skeptic", +] + +[[package]] +name = "pnet" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0caaf5b11fd907ff15cf14a4477bfabca4b37ab9e447a4f8dead969a59cdafad" +dependencies = [ + "pnet_base", + "pnet_datalink", + "pnet_packet", + "pnet_transport", +] + +[[package]] +name = "pnet_base" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d3a993d49e5fd5d4d854d6999d4addca1f72d86c65adf224a36757161c02b6" +dependencies = [ + "no-std-net", +] + +[[package]] +name = "pnet_datalink" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e466faf03a98ad27f6e15cd27a2b7cc89e73e640a43527742977bc503c37f8aa" +dependencies = [ + "ipnetwork", + "libc", + "pnet_base", + "pnet_sys", + "winapi 0.3.9", +] + +[[package]] +name = "pnet_macros" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dd52a5211fac27e7acb14cfc9f30ae16ae0e956b7b779c8214c74559cef4c3" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "pnet_macros_support" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89de095dc7739349559913aed1ef6a11e73ceade4897dadc77c5e09de6740750" +dependencies = [ + "pnet_base", +] + +[[package]] +name = "pnet_packet" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc3b5111e697c39c8b9795b9fdccbc301ab696699e88b9ea5a4e4628978f495f" +dependencies = [ + "glob", + "pnet_base", + "pnet_macros", + "pnet_macros_support", +] + +[[package]] +name = "pnet_sys" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "328e231f0add6d247d82421bf3790b4b33b39c8930637f428eef24c4c6a90805" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "pnet_transport" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff597185e6f1f5671b3122e4dba892a1c73e17c17e723d7669bd9299cbe7f124" +dependencies = [ + "libc", + "pnet_base", + "pnet_packet", + "pnet_sys", +] + +[[package]] +name = "png" +version = "0.17.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1" +dependencies = [ + "bitflags 1.3.2", + "crc32fast", + "fdeflate", + "flate2", + "miniz_oxide", +] + +[[package]] +name = "poll-promise" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6a58fecbf9da8965bcdb20ce4fd29788d1acee68ddbb64f0ba1b81bccdb7df" +dependencies = [ + "document-features", + "static_assertions", + "wasm-bindgen", + "wasm-bindgen-futures", +] + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if 1.0.0", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" +dependencies = [ + "cfg-if 1.0.0", + "concurrent-queue", + "hermit-abi 0.3.9", + "pin-project-lite", + "rustix 0.38.34", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "pollster" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22686f4785f02a4fcc856d3b3bb19bf6c8160d103f7a99cc258bddd0251dc7f2" + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "presser" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa" + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "prettyplease" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +dependencies = [ + "proc-macro2", + "syn 2.0.65", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit 0.19.15", +] + +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.1", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "profiling" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d84d1d7a6ac92673717f9f6d1518374ef257669c24ebc5ac25d5033828be58" +dependencies = [ + "profiling-procmacros", + "puffin", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8021cf59c8ec9c432cfc2526ac6b8aa508ecaf29cd415f271b8406c1b851c3fd" +dependencies = [ + "quote", + "syn 2.0.65", +] + +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools 0.12.1", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "puffin" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f76ad4bb049fded4e572df72cbb6381ff5d1f41f85c3a04b56e4eca287a02f" +dependencies = [ + "anyhow", + "bincode", + "byteorder", + "cfg-if 1.0.0", + "lz4_flex", + "once_cell", + "parking_lot", + "serde", +] + +[[package]] +name = "puffin_http" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4936c085e48efc86f6d96609dc5086d1d236afe3ec4676f09b157a4f4be83ff6" +dependencies = [ + "anyhow", + "crossbeam-channel", + "log", + "parking_lot", + "puffin", +] + +[[package]] +name = "pulldown-cmark" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" +dependencies = [ + "bitflags 2.5.0", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76979bea66e7875e7509c4ec5300112b316af87fa7a252ca91c448b32dfe3993" +dependencies = [ + "bitflags 2.5.0", + "memchr", + "unicase", +] + +[[package]] +name = "pyo3" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +dependencies = [ + "cfg-if 1.0.0", + "eyre", + "indoc", + "libc", + "memoffset 0.9.1", + "parking_lot", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", + "serde", + "unindent", +] + +[[package]] +name = "pyo3-build-config" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" +dependencies = [ + "once_cell", + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-macros" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "pythonize" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c" +dependencies = [ + "pyo3", + "serde", +] + +[[package]] +name = "quick-xml" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" +dependencies = [ + "memchr", +] + +[[package]] +name = "quinn" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.20.9", + "thiserror", + "tokio", + "tracing", + "webpki", +] + +[[package]] +name = "quinn-proto" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" +dependencies = [ + "bytes", + "rand", + "ring 0.16.20", + "rustc-hash", + "rustls 0.20.9", + "rustls-native-certs", + "slab", + "thiserror", + "tinyvec", + "tracing", + "webpki", +] + +[[package]] +name = "quinn-udp" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" +dependencies = [ + "libc", + "quinn-proto", + "socket2 0.4.10", + "tracing", + "windows-sys 0.42.0", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "raw-window-handle" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" + +[[package]] +name = "raw-window-handle" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20675572f6f24e9e76ef639bc5552774ed45f1c30e2951e1e99c59888861c539" + +[[package]] +name = "raw_sync_2" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f067b45fa17e31d15636789c2638bd562da5496d498876cf0495df78f7e4fdcb" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "nix 0.23.2", + "rand", + "winapi 0.3.9", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "re_analytics" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28ed083fc621e7e7d9b9a17dbb4986a5329a05ac1ff52692e77826820eac4de9" +dependencies = [ + "crossbeam", + "directories-next", + "ehttp", + "re_build_info", + "re_build_tools", + "re_log", + "serde", + "serde_json", + "sha2", + "thiserror", + "time", + "uuid", + "web-sys", +] + +[[package]] +name = "re_arrow2" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1285f33f03e2faf9f77b06c19f32f8c54792a4cbb19df762b9ea70b79e0773d" +dependencies = [ + "ahash", + "arrow-format", + "bytemuck", + "chrono", + "comfy-table", + "dyn-clone", + "either", + "ethnum", + "foreign_vec", + "getrandom", + "hash_hasher", + "hashbrown 0.14.5", + "num-traits", + "rustc_version", + "simdutf8", +] + +[[package]] +name = "re_build_info" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147f2e79d9d8ef833666413adbc795403e250cfd8f29f41ede198a5a4dde8612" + +[[package]] +name = "re_build_tools" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88dd6c26a72f68dd437dcbb462dc4fd5b6ab439f41f2dce2a12b95300266f725" +dependencies = [ + "anyhow", + "cargo_metadata 0.18.1", + "glob", + "sha2", + "time", + "unindent", + "walkdir", +] + +[[package]] +name = "re_crash_handler" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3feb098f6d9840f771f00b9c753f45eaa39687c5eac23549dbfe878a62c7d53d" +dependencies = [ + "backtrace", + "itertools 0.12.1", + "libc", + "parking_lot", + "re_analytics", + "re_build_info", +] + +[[package]] +name = "re_data_source" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa09ab2bd8dfb4d3a0c9541adf1ddf1eb880812a8d2e24875533bfce644db469" +dependencies = [ + "ahash", + "anyhow", + "image 0.24.9", + "itertools 0.12.1", + "once_cell", + "parking_lot", + "rayon", + "re_build_tools", + "re_log", + "re_log_encoding", + "re_log_types", + "re_smart_channel", + "re_tracing", + "re_types", + "re_ws_comms", + "thiserror", + "walkdir", +] + +[[package]] +name = "re_data_store" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8faaee8c6f3c9aedf3b1b6c22050661861e41a8840f664d7685f6875667c9ba0" +dependencies = [ + "ahash", + "document-features", + "indent", + "itertools 0.12.1", + "nohash-hasher", + "once_cell", + "parking_lot", + "re_arrow2", + "re_error", + "re_format", + "re_log", + "re_log_types", + "re_tracing", + "re_types_core", + "smallvec", + "thiserror", + "web-time", +] + +[[package]] +name = "re_data_ui" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fde87a2c87ca648bdcce352cf086f7536679c53a7583f2d27c299363fbaf231" +dependencies = [ + "ahash", + "anyhow", + "bytemuck", + "egui", + "egui_extras", + "egui_plot", + "image 0.24.9", + "itertools 0.12.1", + "re_data_store", + "re_entity_db", + "re_error", + "re_format", + "re_log", + "re_log_types", + "re_query", + "re_renderer", + "re_smart_channel", + "re_tracing", + "re_types", + "re_types_core", + "re_ui", + "re_viewer_context", + "rfd", +] + +[[package]] +name = "re_entity_db" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f8d79b407fc1068f92994bcbf8681f1f6087547bf72ef6491f824a6965a0b2" +dependencies = [ + "ahash", + "document-features", + "emath", + "getrandom", + "itertools 0.12.1", + "nohash-hasher", + "parking_lot", + "re_data_store", + "re_format", + "re_int_histogram", + "re_log", + "re_log_encoding", + "re_log_types", + "re_query", + "re_query_cache", + "re_smart_channel", + "re_tracing", + "re_types_core", + "rmp-serde", + "serde", + "thiserror", + "web-time", +] + +[[package]] +name = "re_error" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a561ff5a4cdf5bc37c03f62fd78f618326c4cb0b10d2d7a062b33490bf9d630" + +[[package]] +name = "re_format" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4405cf9203a791ff5e23bcbd54718635f9ec2323d48ea7d2c312adb6af756ca4" +dependencies = [ + "comfy-table", + "num-traits", + "re_arrow2", + "re_tuid", + "re_types_core", +] + +[[package]] +name = "re_int_histogram" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d422a395b4c0e0aed3acaca56a2b5681fc0a89d2a0d58323c2b940ae5944ac2f" +dependencies = [ + "smallvec", + "static_assertions", +] + +[[package]] +name = "re_log" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35056426bb497a2bd30ec54f1aac35f32c6b0f2cd64fd833509ebb1e4be19f11" +dependencies = [ + "env_logger", + "js-sys", + "log", + "log-once", + "parking_lot", + "tracing", + "wasm-bindgen", +] + +[[package]] +name = "re_log_encoding" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10ff455e7e5e091cf8e43ce397c03ebad81357e3f45eab9a076674e7f2002618" +dependencies = [ + "ehttp", + "js-sys", + "lz4_flex", + "parking_lot", + "re_build_info", + "re_log", + "re_log_types", + "re_smart_channel", + "re_tracing", + "rmp-serde", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "web-time", +] + +[[package]] +name = "re_log_types" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239e3176efb51de961a8ea4d27af8b8076c37e40f8788b5a87e060fb806d7a29" +dependencies = [ + "ahash", + "anyhow", + "backtrace", + "bytemuck", + "clean-path", + "crossbeam", + "document-features", + "fixed", + "half", + "itertools 0.12.1", + "natord", + "nohash-hasher", + "num-derive", + "num-traits", + "re_arrow2", + "re_format", + "re_log", + "re_string_interner", + "re_tracing", + "re_tuid", + "re_types_core", + "serde", + "serde_bytes", + "similar-asserts", + "smallvec", + "thiserror", + "time", + "typenum", + "uuid", + "web-time", +] + +[[package]] +name = "re_memory" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6812a6e08580fd7c6864eb018a8535093c23ab28cc4a607fe20b65b991dc1a36" +dependencies = [ + "ahash", + "backtrace", + "emath", + "itertools 0.12.1", + "memory-stats", + "nohash-hasher", + "once_cell", + "parking_lot", + "re_format", + "re_log", + "re_tracing", + "smallvec", + "sysinfo 0.30.12", + "wasm-bindgen", + "web-time", +] + +[[package]] +name = "re_query" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64be284a20b1756e11702fbb6b9021ea9f291f6541dcf0def23dda8da803055c" +dependencies = [ + "backtrace", + "document-features", + "itertools 0.12.1", + "re_arrow2", + "re_data_store", + "re_format", + "re_log", + "re_log_types", + "re_tracing", + "re_types_core", + "serde", + "smallvec", + "thiserror", +] + +[[package]] +name = "re_query_cache" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2e00e32ea25bd8cc8393a184b5c0b135fac7cf86aad131d818f5973dd0ccbf" +dependencies = [ + "ahash", + "indent", + "itertools 0.12.1", + "parking_lot", + "paste", + "re_data_store", + "re_format", + "re_log", + "re_log_types", + "re_query", + "re_tracing", + "re_types_core", + "seq-macro", + "web-time", +] + +[[package]] +name = "re_renderer" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2902018a22e9cb535737582bc2c26e8e4939d0b24a5cd404c61919739e1f6d61" +dependencies = [ + "ahash", + "anyhow", + "bitflags 2.5.0", + "bytemuck", + "cfg-if 1.0.0", + "cfg_aliases 0.2.1", + "clean-path", + "crossbeam", + "document-features", + "ecolor", + "enumset", + "getrandom", + "glam", + "gltf", + "half", + "itertools 0.12.1", + "macaw", + "never", + "notify 6.1.1", + "ordered-float 4.2.0", + "parking_lot", + "pathdiff", + "profiling", + "re_arrow2", + "re_build_tools", + "re_error", + "re_log", + "re_tracing", + "serde", + "slotmap", + "smallvec", + "static_assertions", + "thiserror", + "tinystl", + "tobj", + "type-map", + "walkdir", + "wasm-bindgen-futures", + "wgpu", + "wgpu-core", +] + +[[package]] +name = "re_sdk" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45192dc7062d954626534e46709c3998a9c0773ffc48fad16d0ab2cd0278a895" +dependencies = [ + "ahash", + "anyhow", + "crossbeam", + "document-features", + "itertools 0.12.1", + "once_cell", + "parking_lot", + "re_build_info", + "re_build_tools", + "re_data_source", + "re_log", + "re_log_encoding", + "re_log_types", + "re_memory", + "re_sdk_comms", + "re_smart_channel", + "re_types_core", + "re_web_viewer_server", + "re_ws_comms", + "thiserror", + "webbrowser", +] + +[[package]] +name = "re_sdk_comms" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fe674910694817af5ff3efc4b36f19c7ce9e01f797e9d53b450e094eef3067a" +dependencies = [ + "ahash", + "crossbeam", + "document-features", + "rand", + "re_log", + "re_log_encoding", + "re_log_types", + "re_smart_channel", + "thiserror", + "tokio", +] + +[[package]] +name = "re_smart_channel" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "473934eb487c8f439cbde1e565c7a57e5a37909092c93d6a100efa1a8d52f5d6" +dependencies = [ + "crossbeam", + "parking_lot", + "re_tracing", + "serde", + "web-time", +] + +[[package]] +name = "re_space_view" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88a75158bba7b189228da2ca2c6801a74c42290a0120af78b93ec30035edafb3" +dependencies = [ + "ahash", + "egui", + "itertools 0.12.1", + "nohash-hasher", + "re_data_store", + "re_entity_db", + "re_log", + "re_log_types", + "re_query", + "re_tracing", + "re_types", + "re_types_core", + "re_viewer_context", + "slotmap", + "smallvec", +] + +[[package]] +name = "re_space_view_bar_chart" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ff7920522519199c365b5e049fe46c6dde0c4c1b356318239c0409c00e04495" +dependencies = [ + "egui", + "egui_plot", + "re_data_store", + "re_entity_db", + "re_log", + "re_log_types", + "re_renderer", + "re_space_view", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", +] + +[[package]] +name = "re_space_view_dataframe" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c09039219aae6bc2c6a1cb9e98c36f59858006ee639501b5c190f9e9d7fe7b" +dependencies = [ + "egui", + "egui_extras", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_log_types", + "re_query", + "re_renderer", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", +] + +[[package]] +name = "re_space_view_spatial" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee0c742b6de79ca773bed2aa9255c2f0cad0a4b43181fed025d43e52a5885b73" +dependencies = [ + "ahash", + "anyhow", + "bitflags 2.5.0", + "bytemuck", + "egui", + "glam", + "itertools 0.12.1", + "macaw", + "nohash-hasher", + "once_cell", + "parking_lot", + "rayon", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_error", + "re_format", + "re_log", + "re_log_types", + "re_query", + "re_query_cache", + "re_renderer", + "re_space_view", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", + "serde", + "smallvec", + "web-time", +] + +[[package]] +name = "re_space_view_tensor" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ad9ea32a7ba75fa07ce1346450ce78dbe8b855086dd248d13d44f5cdfa24fc" +dependencies = [ + "ahash", + "anyhow", + "bytemuck", + "egui", + "half", + "ndarray", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_log", + "re_log_types", + "re_renderer", + "re_space_view", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", + "serde", + "thiserror", + "wgpu", +] + +[[package]] +name = "re_space_view_text_document" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea29a1471fccfb28c79bf5b745655c0c3b0b14285a20dd61f29812ee54914cb0" +dependencies = [ + "egui", + "egui_commonmark", + "itertools 0.12.1", + "re_data_store", + "re_log", + "re_query", + "re_renderer", + "re_space_view", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", +] + +[[package]] +name = "re_space_view_text_log" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b72e7a3ec29386a91336cf60859d9668dad072ef4b14317dbf3c0bf86aec53" +dependencies = [ + "egui", + "egui_extras", + "itertools 0.12.1", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_log", + "re_log_types", + "re_query_cache", + "re_renderer", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", +] + +[[package]] +name = "re_space_view_time_series" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8d5194c716fed198893a3712afd4b71c05e914e9b26b9e2a54ed031b4467502" +dependencies = [ + "egui", + "egui_plot", + "itertools 0.12.1", + "parking_lot", + "rayon", + "re_data_store", + "re_format", + "re_log", + "re_log_types", + "re_query", + "re_query_cache", + "re_renderer", + "re_space_view", + "re_tracing", + "re_types", + "re_ui", + "re_viewer_context", +] + +[[package]] +name = "re_string_interner" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbaf2000f39f12a01789ab13f5515028784e62be80006f9a3bdc92283ba9eff4" +dependencies = [ + "ahash", + "nohash-hasher", + "once_cell", + "parking_lot", + "serde", + "static_assertions", +] + +[[package]] +name = "re_time_panel" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a9a4e9831bc379ba8e9cc07987fe63a25100eea2f918f0f4da65d485631809c" +dependencies = [ + "egui", + "itertools 0.12.1", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_format", + "re_log_types", + "re_tracing", + "re_ui", + "re_viewer_context", + "re_viewport", + "serde", + "vec1", +] + +[[package]] +name = "re_tracing" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61ca94b1087b377bde7e109c969b381c38d9a5a93a8265635a18aa3b341a5250" +dependencies = [ + "puffin", + "puffin_http", + "re_log", + "rfd", +] + +[[package]] +name = "re_tuid" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f82367ff940f00195fc6cc868d5e9cbec26ae1a3b7de6fa4b1c37457a42e93b5" +dependencies = [ + "document-features", + "getrandom", + "once_cell", + "serde", + "web-time", +] + +[[package]] +name = "re_types" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6acfd68909dd9720a2f9542ef0f660a33b3e7afd11c46c35f7e10101679d378e" +dependencies = [ + "anyhow", + "array-init", + "bytemuck", + "document-features", + "ecolor", + "egui_plot", + "glam", + "half", + "image 0.24.9", + "infer", + "itertools 0.12.1", + "linked-hash-map", + "mime_guess2", + "ndarray", + "nohash-hasher", + "once_cell", + "ply-rs", + "rayon", + "re_arrow2", + "re_build_tools", + "re_log", + "re_tracing", + "re_types_builder", + "re_types_core", + "smallvec", + "thiserror", + "uuid", + "zune-core", + "zune-jpeg", +] + +[[package]] +name = "re_types_builder" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6bf1bb44be7f5ebed2d3f8bb46c67669d7ecbe52731ab0edd16cc2db3f02f2" +dependencies = [ + "anyhow", + "camino", + "clang-format", + "convert_case", + "flatbuffers 23.5.26", + "indent", + "itertools 0.12.1", + "prettyplease 0.2.20", + "proc-macro2", + "quote", + "rayon", + "re_arrow2", + "re_build_tools", + "re_log", + "re_tracing", + "rust-format", + "syn 2.0.65", + "tempfile", + "unindent", + "xshell", +] + +[[package]] +name = "re_types_core" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a3d0143e33bd1d91ff8754511dbd1f038e4fa7bdc3fca0ac0f8619c0f778d3" +dependencies = [ + "anyhow", + "backtrace", + "bytemuck", + "document-features", + "once_cell", + "re_arrow2", + "re_error", + "re_string_interner", + "re_tracing", + "re_tuid", + "serde", + "smallvec", + "thiserror", +] + +[[package]] +name = "re_ui" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50017df0dca14a4995a71397db0a4f2e9a4a1118aee4e79c14beabdb92413cc6" +dependencies = [ + "egui", + "egui_commonmark", + "egui_extras", + "parking_lot", + "re_entity_db", + "re_format", + "re_log_types", + "serde", + "serde_json", + "strum 0.25.0", + "strum_macros 0.25.3", + "sublime_fuzzy", +] + +[[package]] +name = "re_viewer" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6770f90562ef536eecb5cf9490c2fe125c813b7e58c816afaf3c9b026fcb80e8" +dependencies = [ + "ahash", + "anyhow", + "bytemuck", + "cfg-if 1.0.0", + "eframe", + "egui", + "egui-wgpu", + "egui_extras", + "egui_plot", + "egui_tiles", + "ehttp", + "image 0.24.9", + "itertools 0.12.1", + "js-sys", + "once_cell", + "poll-promise", + "re_analytics", + "re_build_info", + "re_build_tools", + "re_data_source", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_error", + "re_format", + "re_log", + "re_log_encoding", + "re_log_types", + "re_memory", + "re_query_cache", + "re_renderer", + "re_smart_channel", + "re_space_view", + "re_space_view_bar_chart", + "re_space_view_dataframe", + "re_space_view_spatial", + "re_space_view_tensor", + "re_space_view_text_document", + "re_space_view_text_log", + "re_space_view_time_series", + "re_time_panel", + "re_tracing", + "re_types", + "re_types_core", + "re_ui", + "re_viewer_context", + "re_viewport", + "re_ws_comms", + "rfd", + "ron", + "serde", + "serde_json", + "thiserror", + "time", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "web-time", + "wgpu", +] + +[[package]] +name = "re_viewer_context" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2de3a41feea776f865e3315f7422298a7299d5ac59068fbc5018fc9dd2865b8" +dependencies = [ + "ahash", + "anyhow", + "arboard", + "bit-vec", + "bytemuck", + "egui", + "egui-wgpu", + "egui_tiles", + "glam", + "half", + "indexmap 2.2.6", + "itertools 0.12.1", + "macaw", + "ndarray", + "nohash-hasher", + "once_cell", + "parking_lot", + "re_data_source", + "re_data_store", + "re_entity_db", + "re_log", + "re_log_types", + "re_query", + "re_query_cache", + "re_renderer", + "re_smart_channel", + "re_string_interner", + "re_tracing", + "re_types", + "re_ui", + "serde", + "slotmap", + "smallvec", + "thiserror", + "uuid", + "wgpu", +] + +[[package]] +name = "re_viewport" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cc8b3fd1a6eefa041a7dec6a2556f6f9a60c47107e4a3297b9ebb7fa9ae1943" +dependencies = [ + "ahash", + "array-init", + "bytemuck", + "egui", + "egui_tiles", + "glam", + "image 0.24.9", + "itertools 0.12.1", + "nohash-hasher", + "once_cell", + "rayon", + "re_arrow2", + "re_data_store", + "re_data_ui", + "re_entity_db", + "re_log", + "re_log_types", + "re_query", + "re_renderer", + "re_smart_channel", + "re_space_view", + "re_space_view_time_series", + "re_tracing", + "re_types", + "re_types_core", + "re_ui", + "re_viewer_context", + "rmp-serde", + "smallvec", +] + +[[package]] +name = "re_web_viewer_server" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d11fe92ba3e159b2c4fe3640973d00cf8143ffb8d010078068a16b58a0c48b" +dependencies = [ + "clap 4.5.6", + "document-features", + "futures-util", + "hyper 0.14.28", + "re_analytics", + "re_log", + "thiserror", + "tokio", + "webbrowser", +] + +[[package]] +name = "re_ws_comms" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2fcd476b838119c1c57973c7df3b379cc6e74e9b97a72cf9bb4488170b95d20" +dependencies = [ + "anyhow", + "bincode", + "document-features", + "ewebsock", + "futures-channel", + "futures-util", + "parking_lot", + "re_format", + "re_log", + "re_log_types", + "re_memory", + "re_smart_channel", + "re_tracing", + "thiserror", + "tokio", + "tokio-tungstenite", + "tungstenite 0.20.1", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +dependencies = [ + "bitflags 2.5.0", +] + +[[package]] +name = "redox_users" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +dependencies = [ + "getrandom", + "libredox 0.1.3", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.3", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + +[[package]] +name = "renderdoc-sys" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b30a45b0cd0bcca8037f3d0dc3421eaf95327a17cad11964fb8179b4fc4832" + +[[package]] +name = "reqwest" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls 0.22.4", + "rustls-pemfile 2.1.2", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.26.1", + "winreg", +] + +[[package]] +name = "rerun" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6978682653fc699d484b92d0ee75e25b73fdf9b0e50d62b67707d6bcd7f4cc96" +dependencies = [ + "anyhow", + "document-features", + "env_logger", + "itertools 0.12.1", + "log", + "puffin", + "rayon", + "re_analytics", + "re_build_info", + "re_build_tools", + "re_crash_handler", + "re_entity_db", + "re_format", + "re_log", + "re_log_types", + "re_memory", + "re_sdk", + "re_sdk_comms", + "re_smart_channel", + "re_tracing", + "re_types", + "re_viewer", + "re_web_viewer_server", +] + +[[package]] +name = "rfd" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c9e7b57df6e8472152674607f6cc68aa14a748a3157a857a94f516e11aeacc2" +dependencies = [ + "ashpd", + "async-io 1.13.0", + "block", + "dispatch", + "futures-util", + "js-sys", + "log", + "objc", + "objc-foundation", + "objc_id", + "pollster", + "raw-window-handle 0.5.2", + "urlencoding", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "rgb" +version = "0.8.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi 0.3.9", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "ringbuffer-spsc" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1938faa63a2362ee1747afb2d10567d0fb1413b9cbd6198a8541485c4f773" +dependencies = [ + "array-init", + "cache-padded", +] + +[[package]] +name = "rmp" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.7", + "bitflags 2.5.0", + "serde", + "serde_derive", +] + +[[package]] +name = "ros2-client" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958115e451bf1038e4e8508fdc46c6c1830bce22520dfd734639543ff9419660" +dependencies = [ + "async-channel 2.3.1", + "bstr 1.9.1", + "bytes", + "cdr-encoding-size", + "chrono", + "clap 4.5.6", + "futures", + "itertools 0.11.0", + "lazy_static", + "libc", + "log", + "mio 0.6.23", + "mio-extras", + "nom", + "pin-utils", + "rustdds", + "serde", + "serde_repr", + "uuid", + "widestring", +] + +[[package]] +name = "rsa" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "094052d5470cbcef561cb848a7209968c9f12dfa6d668f4bca048ac5de51099c" +dependencies = [ + "byteorder", + "digest", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "smallvec", + "subtle", + "zeroize", +] + +[[package]] +name = "rust-dataflow-example-node" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", + "futures", + "rand", + "tokio", +] + +[[package]] +name = "rust-dataflow-example-sink" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", +] + +[[package]] +name = "rust-dataflow-example-status-node" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "eyre", +] + +[[package]] +name = "rust-format" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e7c00b6c3bf5e38a880eec01d7e829d12ca682079f8238a464def3c4b31627" +dependencies = [ + "prettyplease 0.1.25", + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "rust-ros2-dataflow-example-node" +version = "0.3.4" +dependencies = [ + "dora-node-api", + "dora-ros2-bridge", + "eyre", + "futures", + "futures-timer", + "rand", + "serde_json", + "tokio", +] + +[[package]] +name = "rust_decimal" +version = "1.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" +dependencies = [ + "arrayvec", + "num-traits", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustdds" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da8578c4897ed597565e5934994fa9d875cee3244768a3d4ec67f813b499fc7e" +dependencies = [ + "bit-vec", + "byteorder", + "bytes", + "cdr-encoding-size", + "chrono", + "enumflags2", + "futures", + "if-addrs", + "io-extras", + "local-ip-address", + "log", + "md5", + "mio 0.6.23", + "mio 0.8.11", + "mio-extras", + "num-derive", + "num-traits", + "paste", + "rand", + "serde", + "serde_repr", + "socket2 0.5.7", + "socketpair", + "speedy", + "static_assertions", + "thiserror", +] + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes 1.0.11", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys 0.4.14", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +dependencies = [ + "log", + "ring 0.16.20", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.1", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" + +[[package]] +name = "rustls-webpki" +version = "0.102.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "safer-ffi" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44abae8773dc41fb96af52696b834b1f4c806006b456b22ee3602f7b061e3ad0" +dependencies = [ + "inventory", + "libc", + "macro_rules_attribute", + "paste", + "safer_ffi-proc_macros", + "scopeguard", + "uninit", + "unwind_safe", + "with_builtin_macros", +] + +[[package]] +name = "safer_ffi-proc_macros" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c9d4117a8a72f9b615169d4d720d79e74931f74003c73cc2f3927c700156ddf" +dependencies = [ + "macro_rules_attribute", + "prettyplease 0.1.25", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.65", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scratch" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.8", + "untrusted 0.9.0", +] + +[[package]] +name = "security-framework" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +dependencies = [ + "bitflags 2.5.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +dependencies = [ + "serde", +] + +[[package]] +name = "seq-macro" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" + +[[package]] +name = "serde" +version = "1.0.203" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + +[[package]] +name = "serde-with-expand-env" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "888d884a3be3a209308d0b66f1918ff18f60e93db837259e53ea7d8dd14e7e98" +dependencies = [ + "serde", + "shellexpand 2.1.2", +] + +[[package]] +name = "serde_assert" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b7be0ad5a7b2eefaa5418eb141838270f1ad2d2c6e88acec3795d2425ffa97" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_bytes" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.203" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "serde_json" +version = "1.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +dependencies = [ + "indexmap 1.9.3", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.2.6", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared-memory-server" +version = "0.3.4" +dependencies = [ + "bincode", + "eyre", + "raw_sync_2", + "serde", + "shared_memory_extended", + "tracing", +] + +[[package]] +name = "shared_memory_extended" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "004d7ece9a3be64f85471d50967710b0a146144225bed5f0abd0514a3bed086f" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "nix 0.26.4", + "rand", + "win-sys", +] + +[[package]] +name = "shell-escape" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f" + +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + +[[package]] +name = "shellexpand" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" +dependencies = [ + "dirs 4.0.0", +] + +[[package]] +name = "shellexpand" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" +dependencies = [ + "dirs 5.0.1", +] + +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" +dependencies = [ + "libc", + "mio 0.8.11", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + +[[package]] +name = "similar" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" +dependencies = [ + "bstr 0.2.17", + "unicode-segmentation", +] + +[[package]] +name = "similar-asserts" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" +dependencies = [ + "console", + "similar", +] + +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata 0.14.2", + "error-chain", + "glob", + "pulldown-cmark 0.9.6", + "tempfile", + "walkdir", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "slotmap" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" +dependencies = [ + "serde", + "version_check", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] + +[[package]] +name = "smithay-client-toolkit" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "922fd3eeab3bd820d76537ce8f582b1cf951eceb5475c28500c7457d9d17f53a" +dependencies = [ + "bitflags 2.5.0", + "calloop", + "calloop-wayland-source", + "cursor-icon", + "libc", + "log", + "memmap2", + "rustix 0.38.34", + "thiserror", + "wayland-backend", + "wayland-client", + "wayland-csd-frame", + "wayland-cursor", + "wayland-protocols", + "wayland-protocols-wlr", + "wayland-scanner", + "xkeysym", +] + +[[package]] +name = "smithay-clipboard" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c091e7354ea8059d6ad99eace06dd13ddeedbb0ac72d40a9a6e7ff790525882d" +dependencies = [ + "libc", + "smithay-client-toolkit", + "wayland-backend", +] + +[[package]] +name = "smol_str" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" +dependencies = [ + "serde", +] + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socketpair" +version = "0.19.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "255e2c62749a9d8a59a7957add782c441e1a52a18d03d7bfde6211cf7bac94f6" +dependencies = [ + "io-extras", + "io-lifetimes 2.0.3", + "rustix 0.38.34", + "uuid", + "windows-sys 0.52.0", +] + +[[package]] +name = "speedy" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da1992073f0e55aab599f4483c460598219b4f9ff0affa124b33580ab511e25a" +dependencies = [ + "memoffset 0.9.1", + "speedy-derive", +] + +[[package]] +name = "speedy-derive" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658f2ca5276b92c3dfd65fa88316b4e032ace68f88d7570b43967784c0bac5ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spirv" +version = "0.3.0+sdk-1.3.268.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda41003dc44290527a59b13432d4a0379379fa074b70174882adfbdfd917844" +dependencies = [ + "bitflags 2.5.0", +] + +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "std_prelude" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8207e78455ffdf55661170876f88daf85356e4edd54e0a3dbc79586ca1e50cbe" + +[[package]] +name = "stop-token" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af91f480ee899ab2d9f8435bfdfc14d08a5754bd9d3fef1f1a1c23336aad6c8b" +dependencies = [ + "async-channel 1.9.0", + "cfg-if 1.0.0", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros 0.25.3", +] + +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" + +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.65", +] + +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.65", +] + +[[package]] +name = "sublime_fuzzy" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7986063f7c0ab374407e586d7048a3d5aac94f103f751088bf398e07cd5400" + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "syntect" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874dcfa363995604333cf947ae9f751ca3af4522c60886774c4963943b4746b1" +dependencies = [ + "bincode", + "bitflags 1.3.2", + "flate2", + "fnv", + "once_cell", + "onig", + "plist", + "regex-syntax 0.8.3", + "serde", + "serde_derive", + "serde_json", + "thiserror", + "walkdir", + "yaml-rust", +] + +[[package]] +name = "sys-info" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "sysinfo" +version = "0.29.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" +dependencies = [ + "cfg-if 1.0.0", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi 0.3.9", +] + +[[package]] +name = "sysinfo" +version = "0.30.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" +dependencies = [ + "cfg-if 1.0.0", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "windows 0.52.0", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "target-lexicon" +version = "0.12.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" + +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand 2.1.0", + "rustix 0.38.34", + "windows-sys 0.52.0", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix 0.38.34", + "windows-sys 0.48.0", +] + +[[package]] +name = "textwrap" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" + +[[package]] +name = "thiserror" +version = "1.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" +dependencies = [ + "byteorder", + "integer-encoding", + "log", + "ordered-float 1.1.1", + "threadpool", +] + +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float 2.10.1", +] + +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + +[[package]] +name = "time" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +dependencies = [ + "deranged", + "itoa", + "js-sys", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystl" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdbcdda2f86a57b89b5d9ac17cd4c9f3917ec8edcde403badf3d992d2947af2a" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tobj" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3bd4ba05f29e4c65b6c0c11a58b6465ffa820bac890d76ad407b4e81d8372e8" +dependencies = [ + "ahash", +] + +[[package]] +name = "tokio" +version = "1.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio 0.8.11", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.5.7", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite 0.20.1", +] + +[[package]] +name = "tokio-util" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml_datetime" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.21.7", + "bytes", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +dependencies = [ + "once_cell", + "opentelemetry 0.18.0", + "tracing", + "tracing-core", + "tracing-log 0.1.4", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term 0.46.0", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log 0.2.0", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "ttf-parser" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c591d83f69777866b9126b24c6dd9a18351f177e49d625920d19f989fd31cf8" + +[[package]] +name = "tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 0.2.12", + "httparse", + "log", + "rand", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if 0.1.10", + "static_assertions", +] + +[[package]] +name = "type-map" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb68604048ff8fa93347f02441e4487594adc20bb8a084f9e564d2b827a0a9f" +dependencies = [ + "rustc-hash", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + +[[package]] +name = "uds_windows" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89daebc3e6fd160ac4aa9fc8b3bf71e1f74fbf92367ae71fb83a037e8bf164b9" +dependencies = [ + "memoffset 0.9.1", + "tempfile", + "winapi 0.3.9", +] + +[[package]] +name = "uhlc" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d291a7454d390b753ef68df8145da18367e32883ec2fa863959f0aefb915cdb" +dependencies = [ + "hex", + "humantime", + "lazy_static", + "log", + "serde", + "spin 0.9.8", + "uuid", +] + +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + +[[package]] +name = "unicode-width" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" + +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unindent" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" + +[[package]] +name = "uninit" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e130f2ed46ca5d8ec13c7ff95836827f92f5f5f37fd2b2bf16f33c408d98bb6" +dependencies = [ + "extension-traits", +] + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "unwind_safe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0976c77def3f1f75c4ef892a292c31c0bbe9e3d0702c63044d7c76db298171a3" + +[[package]] +name = "unzip-n" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2e7e85a0596447f0f2ac090e16bc4c516c6fe91771fb0c0ccf7fa3dae896b9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ureq" +version = "2.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" +dependencies = [ + "base64 0.22.1", + "flate2", + "log", + "once_cell", + "rustls 0.22.4", + "rustls-pki-types", + "rustls-webpki", + "url", + "webpki-roots 0.26.1", +] + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "uuid" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "atomic", + "getrandom", + "rand", + "serde", + "uuid-macro-internal", + "wasm-bindgen", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "validated_struct" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feef04c049b4beae3037a2a31b8da40d8cebec0b97456f24c7de0ede4ed9efed" +dependencies = [ + "json5", + "serde", + "serde_json", + "validated_struct_macros", +] + +[[package]] +name = "validated_struct_macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d4444a980afa9ef0d29c2a3f4d952ec0495a7a996a9c78b52698b71bc21edb4" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unzip-n", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vec1" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb60dcfffc189bfd4e2a81333c268619fee9db53da71bce2bcbd8e129c56936" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.65", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + +[[package]] +name = "wasm-streams" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wayland-backend" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d50fa61ce90d76474c87f5fc002828d81b32677340112b4ef08079a9d459a40" +dependencies = [ + "cc", + "downcast-rs", + "rustix 0.38.34", + "scoped-tls", + "smallvec", + "wayland-sys", +] + +[[package]] +name = "wayland-client" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82fb96ee935c2cea6668ccb470fb7771f6215d1691746c2d896b447a00ad3f1f" +dependencies = [ + "bitflags 2.5.0", + "rustix 0.38.34", + "wayland-backend", + "wayland-scanner", +] + +[[package]] +name = "wayland-csd-frame" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625c5029dbd43d25e6aa9615e88b829a5cad13b2819c4ae129fdbb7c31ab4c7e" +dependencies = [ + "bitflags 2.5.0", + "cursor-icon", + "wayland-backend", +] + +[[package]] +name = "wayland-cursor" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71ce5fa868dd13d11a0d04c5e2e65726d0897be8de247c0c5a65886e283231ba" +dependencies = [ + "rustix 0.38.34", + "wayland-client", + "xcursor", +] + +[[package]] +name = "wayland-protocols" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f81f365b8b4a97f422ac0e8737c438024b5951734506b0e1d775c73030561f4" +dependencies = [ + "bitflags 2.5.0", + "wayland-backend", + "wayland-client", + "wayland-scanner", +] + +[[package]] +name = "wayland-protocols-plasma" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23803551115ff9ea9bce586860c5c5a971e360825a0309264102a9495a5ff479" +dependencies = [ + "bitflags 2.5.0", + "wayland-backend", + "wayland-client", + "wayland-protocols", + "wayland-scanner", +] + +[[package]] +name = "wayland-protocols-wlr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad1f61b76b6c2d8742e10f9ba5c3737f6530b4c243132c2a2ccc8aa96fe25cd6" +dependencies = [ + "bitflags 2.5.0", + "wayland-backend", + "wayland-client", + "wayland-protocols", + "wayland-scanner", +] + +[[package]] +name = "wayland-scanner" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b3a62929287001986fb58c789dce9b67604a397c15c611ad9f747300b6c283" +dependencies = [ + "proc-macro2", + "quick-xml", + "quote", +] + +[[package]] +name = "wayland-sys" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0c8eaff5216d07f226cb7a549159267f3467b289d9a2e52fd3ef5aae2b7af" +dependencies = [ + "dlib", + "log", + "once_cell", + "pkg-config", +] + +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webbrowser" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db67ae75a9405634f5882791678772c94ff5f16a66535aae186e26aa0841fc8b" +dependencies = [ + "core-foundation", + "home", + "jni", + "log", + "ndk-context", + "objc", + "raw-window-handle 0.5.2", + "url", + "web-sys", +] + +[[package]] +name = "webpki" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" +dependencies = [ + "ring 0.17.8", + "untrusted 0.9.0", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +dependencies = [ + "webpki", +] + +[[package]] +name = "webpki-roots" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "weezl" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" + +[[package]] +name = "wgpu" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbd7311dbd2abcfebaabf1841a2824ed7c8be443a0f29166e5d3c6a53a762c01" +dependencies = [ + "arrayvec", + "cfg-if 1.0.0", + "cfg_aliases 0.1.1", + "js-sys", + "log", + "naga", + "parking_lot", + "profiling", + "raw-window-handle 0.6.2", + "smallvec", + "static_assertions", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "wgpu-core", + "wgpu-hal", + "wgpu-types", +] + +[[package]] +name = "wgpu-core" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b94525fc99ba9e5c9a9e24764f2bc29bad0911a7446c12f446a8277369bf3a" +dependencies = [ + "arrayvec", + "bit-vec", + "bitflags 2.5.0", + "cfg_aliases 0.1.1", + "codespan-reporting", + "indexmap 2.2.6", + "log", + "naga", + "once_cell", + "parking_lot", + "profiling", + "raw-window-handle 0.6.2", + "rustc-hash", + "smallvec", + "thiserror", + "web-sys", + "wgpu-hal", + "wgpu-types", +] + +[[package]] +name = "wgpu-hal" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1a4924366df7ab41a5d8546d6534f1f33231aa5b3f72b9930e300f254e39c3" +dependencies = [ + "android_system_properties", + "arrayvec", + "ash", + "bitflags 2.5.0", + "block", + "cfg_aliases 0.1.1", + "core-graphics-types", + "glow", + "glutin_wgl_sys", + "gpu-alloc", + "gpu-allocator", + "gpu-descriptor", + "hassle-rs", + "js-sys", + "khronos-egl", + "libc", + "libloading 0.8.3", + "log", + "metal", + "naga", + "ndk-sys", + "objc", + "once_cell", + "parking_lot", + "profiling", + "raw-window-handle 0.6.2", + "renderdoc-sys", + "rustc-hash", + "smallvec", + "thiserror", + "wasm-bindgen", + "web-sys", + "wgpu-types", + "winapi 0.3.9", +] + +[[package]] +name = "wgpu-types" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b671ff9fb03f78b46ff176494ee1ebe7d603393f42664be55b64dc8d53969805" +dependencies = [ + "bitflags 2.5.0", + "js-sys", + "web-sys", +] + +[[package]] +name = "which" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bf3ea8596f3a0dd5980b46430f2058dfe2c36a27ccfbb1845d6fbfcd9ba6e14" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.34", + "windows-sys 0.48.0", +] + +[[package]] +name = "widestring" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" + +[[package]] +name = "wild" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3131afc8c575281e1e80f36ed6a092aa502c08b18ed7524e86fbbb12bb410e1" +dependencies = [ + "glob", +] + +[[package]] +name = "win-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b7b128a98c1cfa201b09eb49ba285887deb3cbe7466a98850eb1adabb452be5" +dependencies = [ + "windows 0.34.0", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +dependencies = [ + "windows_aarch64_msvc 0.34.0", + "windows_i686_gnu 0.34.0", + "windows_i686_msvc 0.34.0", + "windows_x86_64_gnu 0.34.0", + "windows_x86_64_msvc 0.34.0", +] + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-implement" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e2ee588991b9e7e6c8338edf3333fbe4da35dc72092643958ebb43f0ab2c49c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "windows-interface" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6fb8df20c9bcaa8ad6ab513f7b40104840c8867d5751126e4df3b08388d0cc7" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + +[[package]] +name = "winit" +version = "0.29.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d59ad965a635657faf09c8f062badd885748428933dad8e8bdd64064d92e5ca" +dependencies = [ + "ahash", + "android-activity", + "atomic-waker", + "bitflags 2.5.0", + "bytemuck", + "calloop", + "cfg_aliases 0.1.1", + "core-foundation", + "core-graphics", + "cursor-icon", + "icrate", + "js-sys", + "libc", + "log", + "memmap2", + "ndk", + "ndk-sys", + "objc2 0.4.1", + "once_cell", + "orbclient", + "percent-encoding", + "raw-window-handle 0.6.2", + "redox_syscall 0.3.5", + "rustix 0.38.34", + "smithay-client-toolkit", + "smol_str", + "unicode-segmentation", + "wasm-bindgen", + "wasm-bindgen-futures", + "wayland-backend", + "wayland-client", + "wayland-protocols", + "wayland-protocols-plasma", + "web-sys", + "web-time", + "windows-sys 0.48.0", + "x11-dl", + "x11rb", + "xkbcommon-dl", +] + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if 1.0.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "with_builtin_macros" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a59d55032495429b87f9d69954c6c8602e4d3f3e0a747a12dea6b0b23de685da" +dependencies = [ + "with_builtin_macros-proc_macros", +] + +[[package]] +name = "with_builtin_macros-proc_macros" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bd7679c15e22924f53aee34d4e448c45b674feb6129689af88593e129f8f42" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "wrapcenum-derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76ff259533532054cfbaefb115c613203c73707017459206380f03b3b3f266e" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x11-dl" +version = "2.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38735924fedd5314a6e548792904ed8c6de6636285cb9fec04d5b1db85c1516f" +dependencies = [ + "libc", + "once_cell", + "pkg-config", +] + +[[package]] +name = "x11rb" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" +dependencies = [ + "as-raw-xcb-connection", + "gethostname", + "libc", + "libloading 0.8.3", + "once_cell", + "rustix 0.38.34", + "x11rb-protocol", +] + +[[package]] +name = "x11rb-protocol" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d" + +[[package]] +name = "xcursor" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a0ccd7b4a5345edfcd0c3535718a4e9ff7798ffc536bb5b5a0e26ff84732911" + +[[package]] +name = "xdg-home" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21e5a325c3cb8398ad6cf859c1135b25dd29e186679cf2da7581d9679f63b38e" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "xkbcommon-dl" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039de8032a9a8856a6be89cea3e5d12fdd82306ab7c94d74e6deab2460651c5" +dependencies = [ + "bitflags 2.5.0", + "dlib", + "log", + "once_cell", + "xkeysym", +] + +[[package]] +name = "xkeysym" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054a8e68b76250b253f671d1268cb7f1ae089ec35e195b2efb2a4e9a836d0621" + +[[package]] +name = "xml-rs" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" + +[[package]] +name = "xshell" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db0ab86eae739efd1b054a8d3d16041914030ac4e01cd1dca0cf252fd8b6437" +dependencies = [ + "xshell-macros", +] + +[[package]] +name = "xshell-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d422e8e38ec76e2f06ee439ccc765e9c6a9638b9e7c9f2e8255e4d41e8bd852" + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "zbus" +version = "3.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675d170b632a6ad49804c8cf2105d7c31eddd3312555cffd4b740e08e97c25e6" +dependencies = [ + "async-broadcast", + "async-executor", + "async-fs 1.6.0", + "async-io 1.13.0", + "async-lock 2.8.0", + "async-process", + "async-recursion", + "async-task", + "async-trait", + "blocking", + "byteorder", + "derivative", + "enumflags2", + "event-listener 2.5.3", + "futures-core", + "futures-sink", + "futures-util", + "hex", + "nix 0.26.4", + "once_cell", + "ordered-stream", + "rand", + "serde", + "serde_repr", + "sha1", + "static_assertions", + "tracing", + "uds_windows", + "winapi 0.3.9", + "xdg-home", + "zbus_macros", + "zbus_names", + "zvariant", +] + +[[package]] +name = "zbus_macros" +version = "3.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7131497b0f887e8061b430c530240063d33bf9455fa34438f388a245da69e0a5" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", + "zvariant_utils", +] + +[[package]] +name = "zbus_names" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "437d738d3750bed6ca9b8d423ccc7a8eb284f6b1d6d4e225a0e4e6258d864c8d" +dependencies = [ + "serde", + "static_assertions", + "zvariant", +] + +[[package]] +name = "zenoh" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44140d6ebcf2e52ee48acad0e9d960c2b1e868eec021da2538e58373d615fc18" +dependencies = [ + "async-global-executor", + "async-std", + "async-trait", + "base64 0.13.1", + "env_logger", + "event-listener 2.5.3", + "flume 0.10.14", + "form_urlencoded", + "futures", + "git-version", + "hex", + "lazy_static", + "log", + "ordered-float 3.9.2", + "petgraph", + "rand", + "regex", + "rustc_version", + "serde", + "serde_json", + "socket2 0.4.10", + "stop-token", + "uhlc", + "uuid", + "vec_map", + "zenoh-buffers", + "zenoh-cfg-properties", + "zenoh-collections", + "zenoh-config", + "zenoh-core", + "zenoh-crypto", + "zenoh-link", + "zenoh-plugin-trait", + "zenoh-protocol", + "zenoh-protocol-core", + "zenoh-sync", + "zenoh-transport", + "zenoh-util", +] + +[[package]] +name = "zenoh-buffers" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "244d54f1228d3c53fc69483faafcfcc1b4d670b60cffce17696fc49fbc7a6608" +dependencies = [ + "async-std", + "hex", + "zenoh-collections", + "zenoh-core", +] + +[[package]] +name = "zenoh-cfg-properties" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a963395194bf1b64f67d89333e8089f01568ec7ac28c305847f505452a98006e" +dependencies = [ + "zenoh-core", + "zenoh-macros", +] + +[[package]] +name = "zenoh-collections" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e256d7aff2c9af765d77efbfae7fcb708d2d7f4e179aa201bff2f81ad7a3845" +dependencies = [ + "async-std", + "async-trait", + "flume 0.10.14", + "log", + "zenoh-core", + "zenoh-sync", +] + +[[package]] +name = "zenoh-config" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bad1ff61abf28c57e8879ec4286fa29becf7e9bf12555df9a7faddff3bc9ea1b" +dependencies = [ + "flume 0.10.14", + "json5", + "num_cpus", + "serde", + "serde_json", + "serde_yaml 0.9.34+deprecated", + "validated_struct", + "zenoh-cfg-properties", + "zenoh-core", + "zenoh-protocol-core", + "zenoh-util", +] + +[[package]] +name = "zenoh-core" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b0f55158f3f83555db74d4cf5ebc34f90df5d2992cc0de67eba69b99628605e" +dependencies = [ + "anyhow", + "async-std", + "lazy_static", + "zenoh-macros", +] + +[[package]] +name = "zenoh-crypto" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653ba15479a0e3f1a94d7f079babc52f742f3a2bd995c59bc250cfc9a789dbbc" +dependencies = [ + "aes", + "hmac", + "rand", + "rand_chacha", + "sha3", + "zenoh-core", +] + +[[package]] +name = "zenoh-link" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e58770c73cf0b5ec8fbe104d609eec83f9bc3463ea23a583c8b465de77f7d27" +dependencies = [ + "async-std", + "async-trait", + "zenoh-cfg-properties", + "zenoh-config", + "zenoh-core", + "zenoh-link-commons", + "zenoh-link-quic", + "zenoh-link-tcp", + "zenoh-link-tls", + "zenoh-link-udp", + "zenoh-link-unixsock_stream", + "zenoh-protocol-core", +] + +[[package]] +name = "zenoh-link-commons" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21aab9eeb2aba53e37aae57467ffca1268d209811c5e2f39761aab4c1343bce3" +dependencies = [ + "async-std", + "async-trait", + "flume 0.10.14", + "serde", + "zenoh-buffers", + "zenoh-cfg-properties", + "zenoh-core", + "zenoh-protocol", + "zenoh-protocol-core", +] + +[[package]] +name = "zenoh-link-quic" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9f1354094eb4d5e4b864b5aa385efce46f94a43f6ba57dd9ea9a017e6e74176" +dependencies = [ + "async-std", + "async-trait", + "futures", + "log", + "quinn", + "rustls 0.20.9", + "rustls-native-certs", + "rustls-pemfile 1.0.4", + "webpki", + "zenoh-cfg-properties", + "zenoh-config", + "zenoh-core", + "zenoh-link-commons", + "zenoh-protocol-core", + "zenoh-sync", + "zenoh-util", +] + +[[package]] +name = "zenoh-link-tcp" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ffc29707a50680dba124dd4d8bc3bc19feb158db8312433bfc3078f7b8f1ef" +dependencies = [ + "async-std", + "async-trait", + "log", + "zenoh-core", + "zenoh-link-commons", + "zenoh-protocol-core", + "zenoh-sync", + "zenoh-util", +] + +[[package]] +name = "zenoh-link-tls" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a5630b3a218c7179191dab78ebc45da1837793951bddb8fda4f5900b47da552" +dependencies = [ + "async-rustls", + "async-std", + "async-trait", + "futures", + "log", + "rustls-pemfile 1.0.4", + "webpki", + "webpki-roots 0.22.6", + "zenoh-cfg-properties", + "zenoh-config", + "zenoh-core", + "zenoh-link-commons", + "zenoh-protocol-core", + "zenoh-sync", + "zenoh-util", +] + +[[package]] +name = "zenoh-link-udp" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "176494947bd3a6aa10baa469afa4572635822683830808cd71d5554ce15dfebb" +dependencies = [ + "async-std", + "async-trait", + "log", + "socket2 0.4.10", + "zenoh-collections", + "zenoh-core", + "zenoh-link-commons", + "zenoh-protocol-core", + "zenoh-sync", + "zenoh-util", +] + +[[package]] +name = "zenoh-link-unixsock_stream" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d9974305820f92478490ba8b8f119eb5b7d7b4998a7125d1510f6e69f3f81d1" +dependencies = [ + "async-std", + "async-trait", + "futures", + "log", + "nix 0.26.4", + "uuid", + "zenoh-core", + "zenoh-link-commons", + "zenoh-protocol-core", + "zenoh-sync", +] + +[[package]] +name = "zenoh-macros" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a9ac20b120990778cca204ee46c43a37ed4ffbc331e95702615490f9c169de8" +dependencies = [ + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", + "unzip-n", +] + +[[package]] +name = "zenoh-plugin-trait" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3b8bfb8e2625e1150dab46b7a4433f866aa06af763237d564b1aa8f6aaf0b29" +dependencies = [ + "libloading 0.7.4", + "log", + "serde_json", + "zenoh-core", + "zenoh-macros", + "zenoh-util", +] + +[[package]] +name = "zenoh-protocol" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "174a00456e29d941a4230148fd184953e95883bde47a4cfc1a508e0aaec89a89" +dependencies = [ + "log", + "uhlc", + "zenoh-buffers", + "zenoh-core", + "zenoh-protocol-core", +] + +[[package]] +name = "zenoh-protocol-core" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf3eaea2095d2c13fefdae25aca813b3644fc15e1441e16a4398b5113033753" +dependencies = [ + "hex", + "itertools 0.10.5", + "lazy_static", + "rand", + "serde", + "uhlc", + "uuid", + "zenoh-core", +] + +[[package]] +name = "zenoh-sync" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "821070b62a55d4c8a22e1e06c939c1f2d94767e660df9fcbea377781f72f59bf" +dependencies = [ + "async-std", + "event-listener 2.5.3", + "flume 0.10.14", + "futures", + "tokio", + "zenoh-core", +] + +[[package]] +name = "zenoh-transport" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce4387cfc02cb86383de8e65ab1eb204e3908c5f1db9e6b4defd8ad530c9ddea" +dependencies = [ + "async-executor", + "async-global-executor", + "async-std", + "async-trait", + "flume 0.10.14", + "log", + "paste", + "rand", + "ringbuffer-spsc", + "rsa", + "serde", + "zenoh-buffers", + "zenoh-cfg-properties", + "zenoh-collections", + "zenoh-config", + "zenoh-core", + "zenoh-crypto", + "zenoh-link", + "zenoh-protocol", + "zenoh-protocol-core", + "zenoh-sync", +] + +[[package]] +name = "zenoh-util" +version = "0.7.0-rc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54646455dad3940535e97cce03f1b604265177349133903d989bc72e00011404" +dependencies = [ + "async-std", + "clap 3.2.25", + "futures", + "hex", + "home", + "humantime", + "lazy_static", + "libc", + "libloading 0.7.4", + "log", + "pnet", + "pnet_datalink", + "shellexpand 3.1.0", + "winapi 0.3.9", + "zenoh-cfg-properties", + "zenoh-collections", + "zenoh-core", + "zenoh-crypto", + "zenoh-sync", +] + +[[package]] +name = "zerocopy" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + +[[package]] +name = "zstd" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "zune-core" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" + +[[package]] +name = "zune-jpeg" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec866b44a2a1fd6133d363f073ca1b179f438f99e7e5bfb1e33f7181facfe448" +dependencies = [ + "zune-core", +] + +[[package]] +name = "zvariant" +version = "3.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eef2be88ba09b358d3b58aca6e41cd853631d44787f319a1383ca83424fb2db" +dependencies = [ + "byteorder", + "enumflags2", + "libc", + "serde", + "static_assertions", + "url", + "zvariant_derive", +] + +[[package]] +name = "zvariant_derive" +version = "3.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c24dc0bed72f5f90d1f8bb5b07228cbf63b3c6e9f82d82559d4bae666e7ed9" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "syn 1.0.109", + "zvariant_utils", +] + +[[package]] +name = "zvariant_utils" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7234f0d811589db492d16893e3f21e8e2fd282e6d01b0cddee310322062cc200" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..de18999664803ed250cfca2aa1f0357f34aada45 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,151 @@ +[workspace] +members = [ + "apis/c/node", + "apis/c/operator", + "apis/c++/node", + "apis/c++/operator", + "apis/python/node", + "apis/python/operator", + "apis/rust/*", + "apis/rust/operator/macros", + "apis/rust/operator/types", + "binaries/cli", + "binaries/coordinator", + "binaries/daemon", + "binaries/runtime", + "examples/rust-dataflow/node", + "examples/rust-dataflow/status-node", + "examples/rust-dataflow/sink", + "examples/rust-ros2-dataflow/node", + "examples/benchmark/node", + "examples/benchmark/sink", + "examples/multiple-daemons/node", + "examples/multiple-daemons/operator", + "examples/multiple-daemons/sink", + "libraries/arrow-convert", + "libraries/communication-layer/*", + "libraries/core", + "libraries/message", + "libraries/shared-memory-server", + "libraries/extensions/download", + "libraries/extensions/telemetry/*", + "tool_nodes/dora-record", + "tool_nodes/dora-rerun", + "libraries/extensions/ros2-bridge", + "libraries/extensions/ros2-bridge/msg-gen", + "libraries/extensions/ros2-bridge/python", +] + +[workspace.package] +# Make sure to also bump `apis/node/python/__init__.py` version. +version = "0.3.4" +description = "`dora` goal is to be a low latency, composable, and distributed data flow." +documentation = "https://dora.carsmos.ai" +license = "Apache-2.0" + +[workspace.dependencies] +dora-node-api = { version = "0.3.4", path = "apis/rust/node", default-features = false } +dora-node-api-python = { version = "0.3.4", path = "apis/python/node", default-features = false } +dora-operator-api = { version = "0.3.4", path = "apis/rust/operator", default-features = false } +dora-operator-api-macros = { version = "0.3.4", path = "apis/rust/operator/macros" } +dora-operator-api-types = { version = "0.3.4", path = "apis/rust/operator/types" } +dora-operator-api-python = { version = "0.3.4", path = "apis/python/operator" } +dora-operator-api-c = { version = "0.3.4", path = "apis/c/operator" } +dora-node-api-c = { version = "0.3.4", path = "apis/c/node" } +dora-core = { version = "0.3.4", path = "libraries/core" } +dora-arrow-convert = { version = "0.3.4", path = "libraries/arrow-convert" } +dora-tracing = { version = "0.3.4", path = "libraries/extensions/telemetry/tracing" } +dora-metrics = { version = "0.3.4", path = "libraries/extensions/telemetry/metrics" } +dora-download = { version = "0.3.4", path = "libraries/extensions/download" } +shared-memory-server = { version = "0.3.4", path = "libraries/shared-memory-server" } +communication-layer-request-reply = { version = "0.3.4", path = "libraries/communication-layer/request-reply" } +dora-message = { version = "0.3.4", path = "libraries/message" } +dora-runtime = { version = "0.3.4", path = "binaries/runtime" } +dora-daemon = { version = "0.3.4", path = "binaries/daemon" } +dora-coordinator = { version = "0.3.4", path = "binaries/coordinator" } +dora-ros2-bridge = { path = "libraries/extensions/ros2-bridge" } +dora-ros2-bridge-msg-gen = { path = "libraries/extensions/ros2-bridge/msg-gen" } +dora-ros2-bridge-python = { path = "libraries/extensions/ros2-bridge/python" } +arrow = { version = "52" } +arrow-schema = { version = "52" } +arrow-data = { version = "52" } +arrow-array = { version = "52" } +pyo3 = "0.21" +pythonize = "0.21" + +[package] +name = "dora-examples" +version = "0.0.0" +edition = "2021" +license = "Apache-2.0" + + +[features] +# enables examples that depend on a sourced ROS2 installation +ros2-examples = [] + +[dev-dependencies] +eyre = "0.6.8" +tokio = "1.24.2" +dora-coordinator = { workspace = true } +dora-core = { workspace = true } +dora-tracing = { workspace = true } +dora-download = { workspace = true } +dunce = "1.0.2" +serde_yaml = "0.8.23" +uuid = { version = "1.7", features = ["v7", "serde"] } +tracing = "0.1.36" +futures = "0.3.25" +tokio-stream = "0.1.11" + +[[example]] +name = "c-dataflow" +path = "examples/c-dataflow/run.rs" + +[[example]] +name = "rust-dataflow" +path = "examples/rust-dataflow/run.rs" + +[[example]] +name = "rust-ros2-dataflow" +path = "examples/rust-ros2-dataflow/run.rs" +required-features = ["ros2-examples"] + +# TODO: Fix example #192 +[[example]] +name = "rust-dataflow-url" +path = "examples/rust-dataflow-url/run.rs" + +[[example]] +name = "cxx-dataflow" +path = "examples/c++-dataflow/run.rs" + +[[example]] +name = "python-dataflow" +path = "examples/python-dataflow/run.rs" + +[[example]] +name = "python-ros2-dataflow" +path = "examples/python-ros2-dataflow/run.rs" +required-features = ["ros2-examples"] + +[[example]] +name = "python-operator-dataflow" +path = "examples/python-operator-dataflow/run.rs" + +[[example]] +name = "benchmark" +path = "examples/benchmark/run.rs" + +[[example]] +name = "multiple-daemons" +path = "examples/multiple-daemons/run.rs" + +[[example]] +name = "cmake-dataflow" +path = "examples/cmake-dataflow/run.rs" + +[[example]] +name = "cxx-ros2-dataflow" +path = "examples/c++-ros2-dataflow/run.rs" +required-features = ["ros2-examples"] diff --git a/Changelog.md b/Changelog.md new file mode 100644 index 0000000000000000000000000000000000000000..8f0ca9cd0cab1f7a81e44e0ccd76f6be87ba8e51 --- /dev/null +++ b/Changelog.md @@ -0,0 +1,334 @@ +# Changelog + +## v0.3.4 (2024-05-17) + +## What's Changed + +- Remove `cxx_build` call, which is no longer used by @phil-opp in https://github.com/dora-rs/dora/pull/470 +- Update `ros2-client` to latest version by @phil-opp in https://github.com/dora-rs/dora/pull/457 +- Configurable bind addrs by @Michael-J-Ward in https://github.com/dora-rs/dora/pull/471 +- Simple warning fixes by @Michael-J-Ward in https://github.com/dora-rs/dora/pull/477 +- Adding `dora-rerun` as a visualization tool by @haixuanTao in https://github.com/dora-rs/dora/pull/479 +- Fix Clippy and RERUN_MEMORY_LIMIT env variable default by @haixuanTao in https://github.com/dora-rs/dora/pull/490 +- Fix CI build errors by @phil-opp in https://github.com/dora-rs/dora/pull/491 +- Use `resolver = 2` for in workspace in Rust template by @phil-opp in https://github.com/dora-rs/dora/pull/492 +- Add grace duration and kill process by @haixuanTao in https://github.com/dora-rs/dora/pull/487 +- Simplify parsing of `AMENT_PREFIX_PATH` by @haixuanTao in https://github.com/dora-rs/dora/pull/489 +- Convert rust example to node by @Michael-J-Ward in https://github.com/dora-rs/dora/pull/494 +- Adding python IDE typing by @haixuanTao in https://github.com/dora-rs/dora/pull/493 +- Fix: Wait until dora daemon is connected to coordinator on `dora up` by @phil-opp in https://github.com/dora-rs/dora/pull/496 + +## New Contributors + +- @Michael-J-Ward made their first contribution in https://github.com/dora-rs/dora/pull/471 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.3.3...v0.3.4 + +## v0.3.3 (2024-04-08) + +## What's Changed + +- Metrics refactoring by @haixuanTao in https://github.com/dora-rs/dora/pull/423 +- Add ROS2 bridge support for C++ nodes by @phil-opp in https://github.com/dora-rs/dora/pull/425 +- Provide function to create empty `CombinedEvents` stream by @phil-opp in https://github.com/dora-rs/dora/pull/432 +- Expose ROS2 constants in generated bindings (Rust and C++) by @phil-opp in https://github.com/dora-rs/dora/pull/428 +- Add option to send `stdout` as node/operator output by @haixuanTao in https://github.com/dora-rs/dora/pull/388 +- Fix warning about `#pragma once` in main file by @phil-opp in https://github.com/dora-rs/dora/pull/433 +- Send runs artefacts into a dedicated `out` folder by @haixuanTao in https://github.com/dora-rs/dora/pull/429 +- Create README.md for cxx-ros2-example by @bobd988 in https://github.com/dora-rs/dora/pull/431 +- Use Async Parquet Writer for `dora-record` by @haixuanTao in https://github.com/dora-rs/dora/pull/434 +- Update mio to fix security vulnerability by @phil-opp in https://github.com/dora-rs/dora/pull/440 +- Add initial support for calling ROS2 services from Rust nodes by @phil-opp in https://github.com/dora-rs/dora/pull/439 +- Enable ROS2 service calls from C++ nodes by @phil-opp in https://github.com/dora-rs/dora/pull/441 +- Use `Debug` formatting for eyre errors when returning to C++ by @phil-opp in https://github.com/dora-rs/dora/pull/450 +- Fix out-of-tree builds in cmake example by @phil-opp in https://github.com/dora-rs/dora/pull/453 +- Fix broken link in README by @mshr-h in https://github.com/dora-rs/dora/pull/462 +- fix cargo run --example cmake-dataflow compile bugs by @XxChang in https://github.com/dora-rs/dora/pull/460 +- Llm example by @haixuanTao in https://github.com/dora-rs/dora/pull/451 +- Fix meter conflict by @haixuanTao in https://github.com/dora-rs/dora/pull/461 +- Update README.md by @bobd988 in https://github.com/dora-rs/dora/pull/458 +- Refactor `README` by @haixuanTao in https://github.com/dora-rs/dora/pull/463 +- Specify conda env for Python Operators by @haixuanTao in https://github.com/dora-rs/dora/pull/468 + +## Minor + +- Bump h2 from 0.3.24 to 0.3.26 by @dependabot in https://github.com/dora-rs/dora/pull/456 +- Update `bat` dependency to v0.24 by @phil-opp in https://github.com/dora-rs/dora/pull/424 + +## New Contributors + +- @bobd988 made their first contribution in https://github.com/dora-rs/dora/pull/431 + +* @mshr-h made their first contribution in https://github.com/dora-rs/dora/pull/462 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.3.2...v0.3.3 + +## v0.3.2 (2024-01-26) + +## Features + +- Wait until `DestroyResult` is sent before exiting dora-daemon by @phil-opp in https://github.com/dora-rs/dora/pull/413 +- Reduce dora-rs to a single binary by @haixuanTao in https://github.com/dora-rs/dora/pull/410 +- Rework python ROS2 (de)serialization using parsed ROS2 messages directly by @phil-opp in https://github.com/dora-rs/dora/pull/415 +- Fix ros2 array bug by @haixuanTao in https://github.com/dora-rs/dora/pull/412 +- Test ros2 type info by @haixuanTao in https://github.com/dora-rs/dora/pull/418 +- Use forward slash as it is default way of defining ros2 topic by @haixuanTao in https://github.com/dora-rs/dora/pull/419 + +## Minor + +- Bump h2 from 0.3.21 to 0.3.24 by @dependabot in https://github.com/dora-rs/dora/pull/414 + +## v0.3.1 (2024-01-09) + +## Features + +- Support legacy python by @haixuanTao in https://github.com/dora-rs/dora/pull/382 +- Add an error catch in python `on_event` when using hot-reloading by @haixuanTao in https://github.com/dora-rs/dora/pull/372 +- add cmake example by @XxChang in https://github.com/dora-rs/dora/pull/381 +- Bump opentelemetry metrics to 0.21 by @haixuanTao in https://github.com/dora-rs/dora/pull/383 +- Trace send_output as it can be a big source of overhead for large messages by @haixuanTao in https://github.com/dora-rs/dora/pull/384 +- Adding a timeout method to not block indefinitely next event by @haixuanTao in https://github.com/dora-rs/dora/pull/386 +- Adding `Vec` conversion by @haixuanTao in https://github.com/dora-rs/dora/pull/387 +- Dora cli renaming by @haixuanTao in https://github.com/dora-rs/dora/pull/399 +- Update `ros2-client` and `rustdds` dependencies to latest fork version by @phil-opp in https://github.com/dora-rs/dora/pull/397 + +## Fix + +- Fix window path error by @haixuanTao in https://github.com/dora-rs/dora/pull/398 +- Fix read error in C++ node input by @haixuanTao in https://github.com/dora-rs/dora/pull/406 +- Bump unsafe-libyaml from 0.2.9 to 0.2.10 by @dependabot in https://github.com/dora-rs/dora/pull/400 + +## New Contributors + +- @XxChang made their first contribution in https://github.com/dora-rs/dora/pull/381 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.3.0...v0.3.1 + +## v0.3.0 (2023-11-01) + +## Features + +- Rust node API typed using arrow by @phil-opp in https://github.com/dora-rs/dora/pull/353 +- Dora record by @haixuanTao in https://github.com/dora-rs/dora/pull/365 +- beautify graph visualisation by @haixuanTao in https://github.com/dora-rs/dora/pull/370 +- Remove `Ros2Value` encapsulation of `ArrayData` by @haixuanTao in https://github.com/dora-rs/dora/pull/359 +- Refactor python typing by @haixuanTao in https://github.com/dora-rs/dora/pull/369 +- Update README discord link by @Felixhuangsiling in https://github.com/dora-rs/dora/pull/361 + +### Other + +- Update `rustix` v0.38 dependency by @phil-opp in https://github.com/dora-rs/dora/pull/366 +- Bump rustix from 0.37.24 to 0.37.25 by @dependabot in https://github.com/dora-rs/dora/pull/364 +- Bump quinn-proto from 0.9.3 to 0.9.5 by @dependabot in https://github.com/dora-rs/dora/pull/357 +- Bump webpki from 0.22.1 to 0.22.2 by @dependabot in https://github.com/dora-rs/dora/pull/358 +- Update README discord link by @Felixhuangsiling in https://github.com/dora-rs/dora/pull/361 + +## New Contributors + +- @Felixhuangsiling made their first contribution in https://github.com/dora-rs/dora/pull/361 + +## v0.2.6 (2023-09-14) + +- Update dependencies to fix some security advisories by @phil-opp in https://github.com/dora-rs/dora/pull/354 + - Fixes `cargo install dora-daemon` + +## v0.2.5 (2023-09-06) + +### Features + +- Use cargo instead of git in Rust `Cargo.toml` template by @haixuanTao in https://github.com/dora-rs/dora/pull/326 +- Use read_line instead of next_line in stderr by @haixuanTao in https://github.com/dora-rs/dora/pull/325 +- Add a `rust-ros2-dataflow` example using the dora-ros2-bridge by @phil-opp in https://github.com/dora-rs/dora/pull/324 +- Removing patchelf by @haixuanTao in https://github.com/dora-rs/dora/pull/333 +- Improving python example readability by @haixuanTao in https://github.com/dora-rs/dora/pull/334 +- Use `serde_bytes` to serialize `Vec` by @haixuanTao in https://github.com/dora-rs/dora/pull/336 +- Adding support for `Arrow List(*)` for Python by @haixuanTao in https://github.com/dora-rs/dora/pull/337 +- Bump rustls-webpki from 0.100.1 to 0.100.2 by @dependabot in https://github.com/dora-rs/dora/pull/340 +- Add support for event stream merging for Python node API by @phil-opp in https://github.com/dora-rs/dora/pull/339 +- Merge `dora-ros2-bridge` by @phil-opp in https://github.com/dora-rs/dora/pull/341 +- Update dependencies by @phil-opp in https://github.com/dora-rs/dora/pull/345 +- Add support for arbitrary Arrow types in Python API by @phil-opp in https://github.com/dora-rs/dora/pull/343 +- Use typed inputs in Python ROS2 example by @phil-opp in https://github.com/dora-rs/dora/pull/346 +- Use struct type instead of array for ros2 messages by @haixuanTao in https://github.com/dora-rs/dora/pull/349 + +### Other + +- Add Discord :speech_balloon: by @haixuanTao in https://github.com/dora-rs/dora/pull/348 +- Small refactoring by @haixuanTao in https://github.com/dora-rs/dora/pull/342 + +## v0.2.4 (2023-07-18) + +### Features + +- Return dataflow result to CLI on `dora stop` by @phil-opp in https://github.com/dora-rs/dora/pull/300 +- Make dataflow descriptor available to Python nodes and operators by @phil-opp in https://github.com/dora-rs/dora/pull/301 +- Create a `CONTRIBUTING.md` guide by @phil-opp in https://github.com/dora-rs/dora/pull/307 +- Distribute prebuilt arm macos dora-rs by @haixuanTao in https://github.com/dora-rs/dora/pull/308 + +### Other + +- Fix the typos and add dora code branch by @meua in https://github.com/dora-rs/dora/pull/290 +- For consistency with other examples, modify python -> python3 by @meua in https://github.com/dora-rs/dora/pull/299 +- Add timestamps generated by hybrid logical clocks to all sent events by @phil-opp in https://github.com/dora-rs/dora/pull/302 +- Don't recompile the `dora-operator-api-c` crate on every build/run by @phil-opp in https://github.com/dora-rs/dora/pull/304 +- Remove deprecated `proc_macros` feature from `safer-ffi` dependency by @phil-opp in https://github.com/dora-rs/dora/pull/305 +- Update to Rust v1.70 by @phil-opp in https://github.com/dora-rs/dora/pull/303 +- Fix issue with not finding a custom nodes path by @haixuanTao in https://github.com/dora-rs/dora/pull/315 +- Implement `Stream` for `EventStream` by @phil-opp in https://github.com/dora-rs/dora/pull/309 +- Replace unmaintained `atty` crate with `std::io::IsTerminal` by @phil-opp in https://github.com/dora-rs/dora/pull/318 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.2.3...v0.2.4 + +## v0.2.3 (2023-05-24) + +## What's Changed + +- Check that coordinator, daemon, and node versions match by @phil-opp in https://github.com/dora-rs/dora/pull/245 +- Share events to Python without copying via `arrow` crate by @phil-opp in https://github.com/dora-rs/dora/pull/228 +- Upgrading the operator example to use `dora-arrow` by @haixuanTao in https://github.com/dora-rs/dora/pull/251 +- [Python] Show node name in process and put Traceback before the actual Error for more natural error by @haixuanTao in https://github.com/dora-rs/dora/pull/255 +- CLI: Improve error messages when coordinator is not running by @phil-opp in https://github.com/dora-rs/dora/pull/254 +- Integrate `dora-runtime` into `dora-daemon` by @phil-opp in https://github.com/dora-rs/dora/pull/257 +- Filter default log level at `warn` for `tokio::tracing` by @haixuanTao in https://github.com/dora-rs/dora/pull/269 +- Make log level filtering be `WARN` or below by @haixuanTao in https://github.com/dora-rs/dora/pull/274 +- Add support for distributed deployments with multiple daemons by @phil-opp in https://github.com/dora-rs/dora/pull/256 +- Provide a way to access logs through the CLI by @haixuanTao in https://github.com/dora-rs/dora/pull/259 +- Handle node errors during initialization phase by @phil-opp in https://github.com/dora-rs/dora/pull/275 +- Replace watchdog by asynchronous heartbeat messages by @phil-opp in https://github.com/dora-rs/dora/pull/278 +- Remove pyo3 in runtime and daemon as it generates `libpython` depende… by @haixuanTao in https://github.com/dora-rs/dora/pull/281 +- Release v0.2.3 with aarch64 support by @haixuanTao in https://github.com/dora-rs/dora/pull/279 + +## Fix + +- Fix yolov5 dependency issue by @haixuanTao in https://github.com/dora-rs/dora/pull/291 +- To solve this bug https://github.com/dora-rs/dora/issues/283, unify t… by @meua in https://github.com/dora-rs/dora/pull/285 +- Fix: Don't try to create two global tracing subscribers when using bundled runtime by @phil-opp in https://github.com/dora-rs/dora/pull/277 +- CI: Increase timeout for 'build CLI and binaries' step by @phil-opp in https://github.com/dora-rs/dora/pull/282 + +## Other + +- Update `pyo3` to `v0.18` by @phil-opp in https://github.com/dora-rs/dora/pull/246 +- Bump h2 from 0.3.13 to 0.3.17 by @dependabot in https://github.com/dora-rs/dora/pull/249 +- Add automatic issue labeler to organize opened issues by @haixuanTao in https://github.com/dora-rs/dora/pull/265 +- Allow the issue labeler to write issues by @phil-opp in https://github.com/dora-rs/dora/pull/272 +- Add a support matrix with planned feature to clarify dora status by @haixuanTao in https://github.com/dora-rs/dora/pull/264 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.2.2...v0.2.3 + +## v0.2.2 (2023-04-01) + +### Features + +- Make queue length configurable through the dataflow file by @phil-opp in https://github.com/dora-rs/dora/pull/231 +- Hot reloading Python Operator by @haixuanTao in https://github.com/dora-rs/dora/pull/239 +- Synchronize node and operator start by @phil-opp in https://github.com/dora-rs/dora/pull/236 +- Add opentelemetry capability at runtime instead of compile time by @haixuanTao in https://github.com/dora-rs/dora/pull/234 + +### Others + +- Wait on events and messages simultaneously to prevent queue buildup by @phil-opp in https://github.com/dora-rs/dora/pull/235 +- Fix looping in daemon listener loop by @phil-opp in https://github.com/dora-rs/dora/pull/244 +- Validate shell command as source and url source by @haixuanTao in https://github.com/dora-rs/dora/pull/243 +- Push error into the `init_done` channel for debugging context by @haixuanTao in https://github.com/dora-rs/dora/pull/238 +- Option communication config by @haixuanTao in https://github.com/dora-rs/dora/pull/241 +- Validate yaml when reading by @haixuanTao in https://github.com/dora-rs/dora/pull/237 + +**Full Changelog**: https://github.com/dora-rs/dora/compare/v0.2.1...v0.2.2 + +## v0.2.1 (2023-03-22) + +### Features + +- [Make dora-rs publishable on crates.io](https://github.com/dora-rs/dora/pull/211) + +### Fixes + +- [Avoid blocking the daemon main loop by using unbounded queue](https://github.com/dora-rs/dora/pull/230) +- [Inject YAML declared env variable into the runtime](https://github.com/dora-rs/dora/pull/227) +- [Use rustls instead of system SSL implementation](https://github.com/dora-rs/dora/pull/216) + +### Other + +- [Refactor python error](https://github.com/dora-rs/dora/pull/229) +- [The first letter of rust should be lowercase in the command](https://github.com/dora-rs/dora/pull/226) +- [Add documentation to the cli within the helper mode](https://github.com/dora-rs/dora/pull/225) +- [Update to safer-ffi v0.1.0-rc1](https://github.com/dora-rs/dora/pull/218) +- [remove unused variable: data_bytes](https://github.com/dora-rs/dora/pull/215) +- [Clean up: Remove workspace path](https://github.com/dora-rs/dora/pull/210) +- [Decouple opentelemetry from tracing](https://github.com/dora-rs/dora/pull/222) +- [Remove zenoh dependency from dora node API to speed up build](https://github.com/dora-rs/dora/pull/220) +- [Update to Rust v1.68](https://github.com/dora-rs/dora/pull/221) +- [Deny unknown fields to avoid typos](https://github.com/dora-rs/dora/pull/223) +- [Add an internal cli argument to create template with path dependencies](https://github.com/dora-rs/dora/pull/212) + +## v0.2.0 (2023-03-14) + +### Breaking + +- [Redesign: Create a `dora-daemon` as a communication broker](https://github.com/dora-rs/dora/pull/162) + - New `dora-daemon` executable that acts as a communication hub for all local nodes + - Large messages are passed through shared memory without any copying + - [Replaces the previous `iceoryx` communication layer](https://github.com/dora-rs/dora/pull/201) + - Small API change: Nodes and operators now receive _events_ instead of just inputs + - Inputs are one type of event + - Other supported events: `InputClosed` when an input stream is closed and `Stop` when the user stops the dataflow (e.g. through the CLI) + +### Features + +- Better Error handling when operator fails +- [Send small messages directly without shared memory](https://github.com/dora-rs/dora/pull/193) +- [Send all queued incoming events at once on `NextEvent` request](https://github.com/dora-rs/dora/pull/194) +- [Don't send replies for `SendMessage` requests when using TCP](https://github.com/dora-rs/dora/pull/195) +- [Allocate shared memory in nodes to improve throughput](https://github.com/dora-rs/dora/pull/200) + +### Fixes + +- [Manage node failure: Await all nodes to finish before marking dataflow as finished](https://github.com/dora-rs/dora/pull/183) + +### Other + +- [Use `DoraStatus` from dora library in template](https://github.com/dora-rs/dora/pull/182) +- [Simplify: Replace `library_filename` function with `format!` call](https://github.com/dora-rs/dora/pull/191) +- [Refactor Rust node API implementation](https://github.com/dora-rs/dora/pull/196) +- [Remove code duplicate for tracing subscriber and use env variable to manage log level.](https://github.com/dora-rs/dora/pull/197) +- [Add daemon to the release archive](https://github.com/dora-rs/dora/pull/199) +- [Remove `remove_dir_all` from `Cargo.lock`as it is vulnerable to a race condition according to dependabot](https://github.com/dora-rs/dora/pull/202) +- [Update the documentation to the new daemon format](https://github.com/dora-rs/dora/pull/198) +- [Removing legacy `libacl` which was required by Iceoryx](https://github.com/dora-rs/dora/pull/205) +- [Remove unimplemented CLI arguments for now](https://github.com/dora-rs/dora/pull/207) +- [Update zenoh to remove git dependencies](https://github.com/dora-rs/dora/pull/203) +- [Fix cli template to new daemon API](https://github.com/dora-rs/dora/pull/204) +- [Cleanup warnings](https://github.com/dora-rs/dora/pull/208) +- Dependency updates + +## v0.1.3 (2023-01-18) + +- Package `DoraStatus` into dora python package: https://github.com/dora-rs/dora/pull/172 +- Force removal of Pyo3 Object to avoid memory leak: https://github.com/dora-rs/dora/pull/168 +- Bump tokio from 1.21.2 to 1.23.1: https://github.com/dora-rs/dora/pull/171 +- Create a changelog file: https://github.com/dora-rs/dora/pull/174 + +## v0.1.2 (2022-12-15) + +- Fix infinite loop in the coordinator: https://github.com/dora-rs/dora/pull/155 +- Simplify the release process: https://github.com/dora-rs/dora/pull/157 +- Use generic linux distribution: https://github.com/dora-rs/dora/pull/159 + +## v0.1.1 (2022-12-05) + +This release contains fixes for: + +- Python linking using pypi release but also a redesigned python thread model within the runtime to avoid deadlock of the `GIL`. This also fix an issue with `patchelf`. +- A deployment separation for `ubuntu` as the `20.04` version of `dora` and `22.04` version of dora are non-compatible. +- A better tagging of api for `dora` Rust API. + +## v0.1.0 (2022-11-15) + +This is our first release of `dora-rs`! + +The current release includes: + +- `dora-cli` which enables creating, starting and stopping dataflow. +- `dora-coordinator` which is our control plane. +- `dora-runtime` which is manage the runtime of operators. +- `custom-nodes` API which enables bridges from different languages. diff --git a/NOTICE.md b/NOTICE.md new file mode 100644 index 0000000000000000000000000000000000000000..e09a510e7635e4a5501db932628fb4c9615f88d3 --- /dev/null +++ b/NOTICE.md @@ -0,0 +1,7 @@ +## Copyright + +All content is the property of the respective authors or their employers. For more information regarding authorship of content, please consult the listed source code repository logs. + +## License + +This project is licensed under the Apache License, Version 2.0 ([LICENSE](LICENSE) or ). Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be licensed as above, without any additional terms or conditions. diff --git a/apis/c++/node/Cargo.toml b/apis/c++/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ff27a2e88a4c0bbf816b039abc5aed84adc3e727 --- /dev/null +++ b/apis/c++/node/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "dora-node-api-cxx" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +crate-type = ["staticlib"] + +[features] +default = ["tracing"] +tracing = ["dora-node-api/tracing"] +ros2-bridge = [ + "dep:dora-ros2-bridge", + "dep:dora-ros2-bridge-msg-gen", + "dep:rust-format", + "dep:prettyplease", + "dep:serde", + "dep:serde-big-array", +] + +[dependencies] +cxx = "1.0.73" +dora-node-api = { workspace = true } +eyre = "0.6.8" +dora-ros2-bridge = { workspace = true, optional = true } +futures-lite = { version = "2.2" } +serde = { version = "1.0.164", features = ["derive"], optional = true } +serde-big-array = { version = "0.5.1", optional = true } + +[build-dependencies] +cxx-build = "1.0.73" +dora-ros2-bridge-msg-gen = { workspace = true, optional = true } +rust-format = { version = "0.3.4", features = [ + "pretty_please", +], optional = true } +prettyplease = { version = "0.1", features = ["verbatim"], optional = true } diff --git a/apis/c++/node/README.md b/apis/c++/node/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7b6b788f549a7a7a1d545ec7a8ef746f206b1aef --- /dev/null +++ b/apis/c++/node/README.md @@ -0,0 +1,321 @@ +# Dora Node API for C++ + +Dora supports nodes written in C++ through this API crate. + +## Build + +- Clone the `dora` repository: + ```bash + > git clone https://github.com/dora-rs/dora.git + > cd dora + ``` +- Build the `dora-node-api-cxx` package: + ```bash + cargo build --package dora-node-api-cxx + ``` + - This will result in `dora-node-api.h` and `dora-node-api.cc` files in the `target/cxxbridge/dora-node-api-cxx` directory. +- Include the `dora-node-api.h` header file in your source file. +- Add the `dora-node-api.cc` file to your compile and link steps. + +### Build with ROS2 Bridge + +Dora features an experimental ROS2 Bridge that enables dora nodes to publish and subscribe to ROS2 topics. +To enable the bridge, use these steps: + +- Clone the `dora` repository (see above). +- Source the ROS2 setup files (see [ROS2 docs](https://docs.ros.org/en/rolling/Tutorials/Beginner-CLI-Tools/Configuring-ROS2-Environment.html#source-the-setup-files)) +- Optional: Source package-specific ROS2 setup files if you want to use custom package-specific ROS2 messages in the bridge (see [ROS2 docs](https://docs.ros.org/en/rolling/Tutorials/Beginner-Client-Libraries/Creating-Your-First-ROS2-Package.html#source-the-setup-file)) +- Build the `dora-node-api-cxx` package **with the `ros2-bridge` feature enabled**: + ```bash + cargo build --package dora-node-api-cxx --features ros2-bridge + ``` + - In addition to the `dora-node-api.h` and `dora-node-api.cc` files, this will place a `dora-ros2-bindings.h` and a `dora-ros2-bindings.cc` file in the `target/cxxbridge/dora-node-api-cxx` directory. +- Include both the `dora-node-api.h` and the `dora-ros2-bindings.h` header files in your source file. +- Add the `dora-node-api.cc` and `dora-ros2-bindings.cc` files to your compile and link steps. + +## Usage + +The `dora-node-api.h` header provides various functions to interact with Dora. + +### Init Dora Node + +All nodes need to register themselves with Dora at startup. +To do that, call the `init_dora_node()` function. +The function returns a `DoraNode` instance, which gives access to dora events and enables sending Dora outputs. + +```c++ +auto dora_node = init_dora_node(); +``` + +### Receiving Events + +The `dora_node.events` field is a stream of incoming events. +To wait for the next incoming event, call `dora_node.events->next()`: + +```c++ +auto event = dora_node.events->next(); +``` + +The `next` function returns an opaque `DoraEvent` type, which cannot be inspected from C++ directly. +Instead, use the following functions to read and destructure the event: + +- `event_type(event)` returns a `DoraEventType`, which describes the kind of event. For example, an event could be an input or a stop instruction. + - when receiving a `DoraEventType::AllInputsClosed`, the node should exit and not call `next` anymore +- Events of type `DoraEventType::Input` can be downcasted using `event_as_input`: + ```c++ + auto input = event_as_input(std::move(event)); + ``` + The function returns a `DoraInput` instance, which has an `id` and `data` field. + - The input `id` can be converted to a C++ string through `std::string(input.id)`. + - The `data` of inputs is currently of type [`rust::Vec`](https://cxx.rs/binding/vec.html). Use the provided methods for reading or converting the data. + - **Note:** In the future, we plan to change the data type to the [Apache Arrow](https://arrow.apache.org/) data format to support typed inputs. + +### Sending Outputs + +Nodes can send outputs using the `send_output` output function and the `dora_node.send_output` field. +Note that all outputs need to be listed in the dataflow YAML declaration file, otherwise an error will occur. + +**Example:** + +```c++ +// the data you want to send (NOTE: only byte vectors are supported right now) +std::vector out_vec{42}; +// create a Rust slice from the output vector +rust::Slice out_slice{out_vec.data(), out_vec.size()}; +// send the slice as output +auto result = send_output(dora_node.send_output, "output_id", out_slice); + +// check for errors +auto error = std::string(result.error); +if (!error.empty()) +{ + std::cerr << "Error: " << error << std::endl; + return -1; +} +``` + +## Using the ROS2 Bridge + +The `dora-ros2-bindings.h` contains function and struct definitions that allow interacting with ROS2 nodes. +Currently, the bridge supports publishing and subscribing to ROS2 topics. +In the future, we plan to support ROS2 services and ROS2 actions as well. + +### Initializing the ROS2 Context + +The first step is to initialize a ROS2 context: + +```c++ +auto ros2_context = init_ros2_context(); +``` + +### Creating Nodes + +After initializing a ROS2 context, you can use it to create ROS2 nodes: + +```c++ +auto node = ros2_context->new_node("/ros2_demo", "turtle_teleop"); +``` + +The first argument is the namespace of the node and the second argument is its name. + +### Creating Topics + +After creating a node, you can use one of the `create_topic_` functions to create a topic on it. +The `` describes the message type that will be sent on the topic. +The Dora ROS2 bridge automatically creates `create_topic_` functions for all messages types found in the sourced ROS2 environment. + +```c++ +auto vel_topic = node->create_topic_geometry_msgs_Twist("/turtle1", "cmd_vel", qos_default()); +``` + +The first argument is the namespace of the topic and the second argument is its name. +The third argument is the QoS (quality of service) setting for the topic. +It can be adjusted as desired, for example: + +```c++ +auto qos = qos_default(); +qos.durability = Ros2Durability::Volatile; +qos.liveliness = Ros2Liveliness::Automatic; +auto vel_topic = node->create_topic_geometry_msgs_Twist("/turtle1", "cmd_vel", qos); +``` + +### Publish + +After creating a topic, it is possible to publish messages on it. +First, create a publisher: + +```c++ +auto vel_publisher = node->create_publisher(vel_topic, qos); +``` + +The returned publisher is typed by the chosen topic. +It will only accept messages of the topic's type, otherwise a compile error will occur. + +After creating a publisher, you can use the `publish` function to publish one or more messages. +For example: + +```c++ +geometry_msgs::Twist twist = { + .linear = {.x = 1, .y = 0, .z = 0}, + .angular = {.x = 0, .y = 0, .z = 0.5} +}; +vel_publisher->publish(twist); +``` + +The `geometry_msgs::Twist` struct is automatically generated from the sourced ROS2 environment. +Since the publisher is typed, its `publish` method only accepts `geometry_msgs::Twist` messages. + + +### Subscriptions + +Subscribing to a topic is possible through the `create_subscription` function on nodes: + +```c++ +auto pose_topic = node->create_topic_turtlesim_Pose("/turtle1", "pose", qos_default()); +auto pose_subscription = node->create_subscription(pose_topic, qos_default(), event_stream); +``` + +The `topic` is the topic you want to subscribe to, created using a `create_topic_` function. +The second argument is the quality of service setting, which can be customized as described above. + +The third parameter is the event stream that the received messages should be merged into. +Multiple subscriptions can be merged into the same event stream. + +#### Combined Event Streams + +Combined event streams enable the merging of multiple event streams into one. +The combined stream will then deliver messages from all sources, in order of arrival. + +You can create such a event stream from Dora's event stream using the `dora_events_into_combined` function: + +```c++ +auto event_stream = dora_events_into_combined(std::move(dora_node.events)); +``` + +Alternatively, if you don't want to use Dora, you can also create an empty event stream: + +```c++ +auto event_stream = empty_combined_events(); +``` + +**Note:** You should only use `empty_combined_events` if you're running your executable independent of Dora. +Ignoring the events from the `dora_node.events` channel can result in unintended behavior. + +#### Receiving Messages from Combined Event Stream + +The merged event stream will receive all incoming events of the node, including Dora events and ROS2 messages. +To wait for the next incoming event, use its `next` method: + +```c++ +auto event = event_stream.next(); +``` + +This returns a `event` instance of type `CombinedEvent`, which can be downcasted to Dora events or ROS2 messages. +To handle an event, you should check it's type and then downcast it: + +- To check for a Dora event, you can use the `is_dora()` function. If it returns `true`, you can downcast the combined event to a Dora event using the `downcast_dora` function. +- ROS2 subscriptions support a `matches` function to check whether a combined event is an instance of the respective ROS2 subscription. If it returns true, you can downcast the event to the respective ROS2 message struct using the subscription's `downcast` function. + +**Example:** + +```c++ +if (event.is_dora()) +{ + auto dora_event = downcast_dora(std::move(event)); + // handle dora_event as described above + auto ty = event_type(dora_event); + if (ty == DoraEventType::Input) + { + auto input = event_as_input(std::move(dora_event)); + // etc + } + // .. else if +} +else if (pose_subscription->matches(event)) +{ + auto pose = pose_subscription->downcast(std::move(event)); + std::cout << "Received pose x:" << pose.x << ", y:" << pose.y << std::endl; +} +else +{ + std::cout << "received unexpected event" << std::endl; +} +``` + +### Constants + +Some ROS2 message definitions define constants, e.g. to specify the values of an enum-like integer field. +The Dora ROS2 bridge exposes these constants in the generated bindings as functions. + +For example, the `STATUS_NO_FIX` constant of the [`NavSatStatus` message](https://docs.ros.org/en/jade/api/sensor_msgs/html/msg/NavSatStatus.html) can be accessed as follows: + +```c++ +assert((sensor_msgs::const_NavSatStatus_STATUS_NO_FIX() == -1)); +``` + +(Note: Exposing them as C++ constants is not possible because it's [not supported by `cxx` yet](https://github.com/dtolnay/cxx/issues/1051).) + +### Service Clients + +To create a service client, use one of the `create_client_` functions. +The `` describes the service type, which specifies the request and response types. +The Dora ROS2 bridge automatically creates `create_client_` functions for all service types found in the sourced ROS2 environment. + +```c++ +auto add_two_ints = node->create_client_example_interfaces_AddTwoInts( + "/", + "add_two_ints", + qos, + merged_events +); +``` + +- The first argument is the namespace of the service and the second argument is its name. +- The third argument is the QoS (quality of service) setting for the service. + It can be set to `qos_default()` or adjusted as desired, for example: + ```c++ + auto qos = qos_default(); + qos.reliable = true; + qos.max_blocking_time = 0.1; + qos.keep_last = 1; + ``` +- The last argument is the [combined event stream](#combined-event-streams) that the received service responses should be merged into. + +#### Waiting for the Service + +In order to achieve reliable service communication, it is recommended to wait until the service is available before sending requests. +Use the `wait_for_service()` method for that, e.g.: + +```c++ +add_two_ints->wait_for_service(node) +``` + +The given `node` must be the node on which the service was created. + +#### Sending Requests + +To send a request, use the `send_request` method: + +```c++ +add_two_ints->send_request(request); +``` + +The method sends the request asynchronously without waiting for a response. +When the response is received, it is automatically sent to the [combined event stream](#combined-event-streams) that was given on client creation. + +#### Receiving Responses + +See the [_"Receiving Messages from Combined Event Stream"_](#receiving-messages-from-combined-event-stream) section for how to receive events from the combined event stream. +To check if a received event is a service response, use the `matches` method. +If it returns `true`, you can use the `downcast` method to convert the event to the correct service response type. + +Example: + +```c++ +if (add_two_ints->matches(event)) +{ + auto response = add_two_ints->downcast(std::move(event)); + std::cout << "Received sum response with value " << response.sum << std::endl; + ... +} +``` diff --git a/apis/c++/node/build.rs b/apis/c++/node/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ca8f623705149e51fa8f783308e93077941c485 --- /dev/null +++ b/apis/c++/node/build.rs @@ -0,0 +1,159 @@ +use std::path::{Path, PathBuf}; + +fn main() { + let mut bridge_files = vec![PathBuf::from("src/lib.rs")]; + #[cfg(feature = "ros2-bridge")] + bridge_files.push(ros2::generate()); + + let _build = cxx_build::bridges(&bridge_files); + println!("cargo:rerun-if-changed=src/lib.rs"); + + // rename header files + let src_dir = target_dir() + .join("cxxbridge") + .join("dora-node-api-cxx") + .join("src"); + let target_dir = src_dir.parent().unwrap(); + std::fs::copy(src_dir.join("lib.rs.h"), target_dir.join("dora-node-api.h")).unwrap(); + std::fs::copy( + src_dir.join("lib.rs.cc"), + target_dir.join("dora-node-api.cc"), + ) + .unwrap(); + + #[cfg(feature = "ros2-bridge")] + ros2::generate_ros2_message_header(bridge_files.last().unwrap()); + + // to avoid unnecessary `mut` warning + bridge_files.clear(); +} + +fn target_dir() -> PathBuf { + std::env::var("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| { + let root = Path::new(env!("CARGO_MANIFEST_DIR")) + .ancestors() + .nth(3) + .unwrap(); + root.join("target") + }) +} + +#[cfg(feature = "ros2-bridge")] +mod ros2 { + use super::target_dir; + use std::{ + io::{BufRead, BufReader}, + path::{Component, Path, PathBuf}, + }; + + pub fn generate() -> PathBuf { + use rust_format::Formatter; + let paths = ament_prefix_paths(); + let generated = dora_ros2_bridge_msg_gen::gen(paths.as_slice(), true); + let generated_string = rust_format::PrettyPlease::default() + .format_tokens(generated) + .unwrap(); + let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); + let target_file = out_dir.join("ros2_bindings.rs"); + std::fs::write(&target_file, generated_string).unwrap(); + println!( + "cargo:rustc-env=ROS2_BINDINGS_PATH={}", + target_file.display() + ); + + target_file + } + + fn ament_prefix_paths() -> Vec { + let ament_prefix_path: String = match std::env::var("AMENT_PREFIX_PATH") { + Ok(path) => path, + Err(std::env::VarError::NotPresent) => { + println!("cargo:warning='AMENT_PREFIX_PATH not set'"); + String::new() + } + Err(std::env::VarError::NotUnicode(s)) => { + panic!( + "AMENT_PREFIX_PATH is not valid unicode: `{}`", + s.to_string_lossy() + ); + } + }; + println!("cargo:rerun-if-env-changed=AMENT_PREFIX_PATH"); + + let paths: Vec<_> = ament_prefix_path.split(':').map(PathBuf::from).collect(); + for path in &paths { + println!("cargo:rerun-if-changed={}", path.display()); + } + + paths + } + + pub fn generate_ros2_message_header(source_file: &Path) { + use std::io::Write as _; + + let out_dir = source_file.parent().unwrap(); + let relative_path = local_relative_path(&source_file) + .ancestors() + .nth(2) + .unwrap() + .join("out"); + let header_path = out_dir + .join("cxxbridge") + .join("include") + .join("dora-node-api-cxx") + .join(&relative_path) + .join("ros2_bindings.rs.h"); + let code_path = out_dir + .join("cxxbridge") + .join("sources") + .join("dora-node-api-cxx") + .join(&relative_path) + .join("ros2_bindings.rs.cc"); + + // copy message files to target directory + let target_path = target_dir() + .join("cxxbridge") + .join("dora-node-api-cxx") + .join("dora-ros2-bindings.h"); + + std::fs::copy(&header_path, &target_path).unwrap(); + println!("cargo:rerun-if-changed={}", header_path.display()); + + let node_header = + std::fs::File::open(target_path.with_file_name("dora-node-api.h")).unwrap(); + let mut code_file = std::fs::File::open(&code_path).unwrap(); + println!("cargo:rerun-if-changed={}", code_path.display()); + let mut code_target_file = + std::fs::File::create(target_path.with_file_name("dora-ros2-bindings.cc")).unwrap(); + + // copy both the node header and the code file to prevent import errors + let mut header_reader = { + let mut reader = BufReader::new(node_header); + + // read first line to skip `#pragma once`, which is not allowed in main files + let mut first_line = String::new(); + reader.read_line(&mut first_line).unwrap(); + assert_eq!(first_line.trim(), "#pragma once"); + + reader + }; + std::io::copy(&mut header_reader, &mut code_target_file).unwrap(); + std::io::copy(&mut code_file, &mut code_target_file).unwrap(); + code_target_file.flush().unwrap(); + } + + // copy from cxx-build source + fn local_relative_path(path: &Path) -> PathBuf { + let mut rel_path = PathBuf::new(); + for component in path.components() { + match component { + Component::Prefix(_) | Component::RootDir | Component::CurDir => {} + Component::ParentDir => drop(rel_path.pop()), // noop if empty + Component::Normal(name) => rel_path.push(name), + } + } + rel_path + } +} diff --git a/apis/c++/node/src/lib.rs b/apis/c++/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3476de87626c0f325179ac35c3d5bba3ac436a8 --- /dev/null +++ b/apis/c++/node/src/lib.rs @@ -0,0 +1,225 @@ +use std::any::Any; + +use dora_node_api::{ + self, + arrow::array::{AsArray, BinaryArray}, + merged::{MergeExternal, MergedEvent}, + Event, EventStream, +}; +use eyre::bail; + +#[cfg(feature = "ros2-bridge")] +use dora_ros2_bridge::{_core, ros2_client}; +use futures_lite::{stream, Stream, StreamExt}; + +#[cxx::bridge] +#[allow(clippy::needless_lifetimes)] +mod ffi { + struct DoraNode { + events: Box, + send_output: Box, + } + + pub enum DoraEventType { + Stop, + Input, + InputClosed, + Error, + Unknown, + AllInputsClosed, + } + + struct DoraInput { + id: String, + data: Vec, + } + + struct DoraResult { + error: String, + } + + pub struct CombinedEvents { + events: Box, + } + + pub struct CombinedEvent { + event: Box, + } + + extern "Rust" { + type Events; + type OutputSender; + type DoraEvent; + type MergedEvents; + type MergedDoraEvent; + + fn init_dora_node() -> Result; + + fn dora_events_into_combined(events: Box) -> CombinedEvents; + fn empty_combined_events() -> CombinedEvents; + fn next(self: &mut Events) -> Box; + fn next_event(events: &mut Box) -> Box; + fn event_type(event: &Box) -> DoraEventType; + fn event_as_input(event: Box) -> Result; + fn send_output( + output_sender: &mut Box, + id: String, + data: &[u8], + ) -> DoraResult; + + fn next(self: &mut CombinedEvents) -> CombinedEvent; + + fn is_dora(self: &CombinedEvent) -> bool; + fn downcast_dora(event: CombinedEvent) -> Result>; + } +} + +#[cfg(feature = "ros2-bridge")] +pub mod ros2 { + pub use dora_ros2_bridge::*; + include!(env!("ROS2_BINDINGS_PATH")); +} + +fn init_dora_node() -> eyre::Result { + let (node, events) = dora_node_api::DoraNode::init_from_env()?; + let events = Events(events); + let send_output = OutputSender(node); + + Ok(ffi::DoraNode { + events: Box::new(events), + send_output: Box::new(send_output), + }) +} + +pub struct Events(EventStream); + +impl Events { + fn next(&mut self) -> Box { + Box::new(DoraEvent(self.0.recv())) + } +} + +fn next_event(events: &mut Box) -> Box { + events.next() +} + +fn dora_events_into_combined(events: Box) -> ffi::CombinedEvents { + let events = events.0.map(MergedEvent::Dora); + ffi::CombinedEvents { + events: Box::new(MergedEvents { + events: Some(Box::new(events)), + next_id: 1, + }), + } +} + +fn empty_combined_events() -> ffi::CombinedEvents { + ffi::CombinedEvents { + events: Box::new(MergedEvents { + events: Some(Box::new(stream::empty())), + next_id: 1, + }), + } +} + +pub struct DoraEvent(Option); + +fn event_type(event: &DoraEvent) -> ffi::DoraEventType { + match &event.0 { + Some(event) => match event { + Event::Stop => ffi::DoraEventType::Stop, + Event::Input { .. } => ffi::DoraEventType::Input, + Event::InputClosed { .. } => ffi::DoraEventType::InputClosed, + Event::Error(_) => ffi::DoraEventType::Error, + _ => ffi::DoraEventType::Unknown, + }, + None => ffi::DoraEventType::AllInputsClosed, + } +} + +fn event_as_input(event: Box) -> eyre::Result { + let Some(Event::Input { + id, + metadata: _, + data, + }) = event.0 + else { + bail!("not an input event"); + }; + let data: Option<&BinaryArray> = data.as_binary_opt(); + Ok(ffi::DoraInput { + id: id.into(), + data: data.map(|d| d.value(0).to_owned()).unwrap_or_default(), + }) +} + +pub struct OutputSender(dora_node_api::DoraNode); + +fn send_output(sender: &mut Box, id: String, data: &[u8]) -> ffi::DoraResult { + let result = sender + .0 + .send_output_raw(id.into(), Default::default(), data.len(), |out| { + out.copy_from_slice(data) + }); + let error = match result { + Ok(()) => String::new(), + Err(err) => format!("{err:?}"), + }; + ffi::DoraResult { error } +} + +pub struct MergedEvents { + events: Option> + Unpin>>, + next_id: u32, +} + +impl MergedEvents { + fn next(&mut self) -> MergedDoraEvent { + let event = futures_lite::future::block_on(self.events.as_mut().unwrap().next()); + MergedDoraEvent(event) + } + + pub fn merge(&mut self, events: impl Stream> + Unpin + 'static) -> u32 { + let id = self.next_id; + self.next_id += 1; + let events = Box::pin(events.map(move |event| ExternalEvent { event, id })); + + let inner = self.events.take().unwrap(); + let merged: Box + Unpin + 'static> = + Box::new(inner.merge_external(events).map(|event| match event { + MergedEvent::Dora(event) => MergedEvent::Dora(event), + MergedEvent::External(event) => MergedEvent::External(event.flatten()), + })); + self.events = Some(merged); + + id + } +} + +impl ffi::CombinedEvents { + fn next(&mut self) -> ffi::CombinedEvent { + ffi::CombinedEvent { + event: Box::new(self.events.next()), + } + } +} + +pub struct MergedDoraEvent(Option>); + +pub struct ExternalEvent { + pub event: Box, + pub id: u32, +} + +impl ffi::CombinedEvent { + fn is_dora(&self) -> bool { + matches!(&self.event.0, Some(MergedEvent::Dora(_))) + } +} + +fn downcast_dora(event: ffi::CombinedEvent) -> eyre::Result> { + match event.event.0 { + Some(MergedEvent::Dora(event)) => Ok(Box::new(DoraEvent(Some(event)))), + _ => eyre::bail!("not an external event"), + } +} diff --git a/apis/c++/operator/Cargo.toml b/apis/c++/operator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..32d998cf8c188873bfc53ade02682903c2af5210 --- /dev/null +++ b/apis/c++/operator/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-operator-api-cxx" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +[lib] +crate-type = ["staticlib"] + +[dependencies] +cxx = "1.0.73" +dora-operator-api = { workspace = true } + +[build-dependencies] +cxx-build = "1.0.73" diff --git a/apis/c++/operator/build.rs b/apis/c++/operator/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..7b1aa53b5fb5ca8cb13db7776b62555f40f82566 --- /dev/null +++ b/apis/c++/operator/build.rs @@ -0,0 +1,4 @@ +fn main() { + let _ = cxx_build::bridge("src/lib.rs"); + println!("cargo:rerun-if-changed=src/lib.rs"); +} diff --git a/apis/c++/operator/src/lib.rs b/apis/c++/operator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ea7bab37affd525246d4d55981af7484370dfc8 --- /dev/null +++ b/apis/c++/operator/src/lib.rs @@ -0,0 +1,98 @@ +#![cfg(not(test))] +#![warn(unsafe_op_in_unsafe_fn)] + +use dora_operator_api::{ + self, register_operator, DoraOperator, DoraOutputSender, DoraStatus, Event, IntoArrow, +}; +use ffi::DoraSendOutputResult; + +#[cxx::bridge] +#[allow(unsafe_op_in_unsafe_fn)] +mod ffi { + struct DoraOnInputResult { + error: String, + stop: bool, + } + + struct DoraSendOutputResult { + error: String, + } + + extern "Rust" { + type OutputSender<'a, 'b>; + + fn send_output(sender: &mut OutputSender, id: &str, data: &[u8]) -> DoraSendOutputResult; + } + + unsafe extern "C++" { + include!("operator.h"); + + type Operator; + + fn new_operator() -> UniquePtr; + + fn on_input( + op: Pin<&mut Operator>, + id: &str, + data: &[u8], + output_sender: &mut OutputSender, + ) -> DoraOnInputResult; + } +} + +pub struct OutputSender<'a, 'b>(&'a mut DoraOutputSender<'b>); + +fn send_output(sender: &mut OutputSender, id: &str, data: &[u8]) -> DoraSendOutputResult { + let error = sender + .0 + .send(id.into(), data.to_owned().into_arrow()) + .err() + .unwrap_or_default(); + DoraSendOutputResult { error } +} + +register_operator!(OperatorWrapper); + +struct OperatorWrapper { + operator: cxx::UniquePtr, +} + +impl Default for OperatorWrapper { + fn default() -> Self { + Self { + operator: ffi::new_operator(), + } + } +} + +impl DoraOperator for OperatorWrapper { + fn on_event( + &mut self, + event: &Event, + output_sender: &mut DoraOutputSender, + ) -> Result { + match event { + Event::Input { id, data } => { + let operator = self.operator.as_mut().unwrap(); + let mut output_sender = OutputSender(output_sender); + let data: &[u8] = data + .try_into() + .map_err(|err| format!("expected byte array: {err}"))?; + + let result = ffi::on_input(operator, id, data, &mut output_sender); + if result.error.is_empty() { + Ok(match result.stop { + false => DoraStatus::Continue, + true => DoraStatus::Stop, + }) + } else { + Err(result.error) + } + } + _ => { + // ignore other events for now + Ok(DoraStatus::Continue) + } + } + } +} diff --git a/apis/c/node/Cargo.toml b/apis/c/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2a601c643988633e988058f1f23588eab10fce1e --- /dev/null +++ b/apis/c/node/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "dora-node-api-c" +version.workspace = true +edition = "2021" + +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +crate-type = ["staticlib", "lib"] + + +[features] +default = ["tracing"] +tracing = ["dora-node-api/tracing"] + +[dependencies] +eyre = "0.6.8" +tracing = "0.1.33" +arrow-array = { workspace = true } + +[dependencies.dora-node-api] +workspace = true diff --git a/apis/c/node/node_api.h b/apis/c/node/node_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5089087696287f6be8e82121f417d1da14306dd3 --- /dev/null +++ b/apis/c/node/node_api.h @@ -0,0 +1,22 @@ +#include + +void *init_dora_context_from_env(); +void free_dora_context(void *dora_context); + +void *dora_next_event(void *dora_context); +void free_dora_event(void *dora_event); + +enum DoraEventType +{ + DoraEventType_Stop, + DoraEventType_Input, + DoraEventType_InputClosed, + DoraEventType_Error, + DoraEventType_Unknown, +}; +enum DoraEventType read_dora_event_type(void *dora_event); + +void read_dora_input_id(void *dora_event, char **out_ptr, size_t *out_len); +void read_dora_input_data(void *dora_event, char **out_ptr, size_t *out_len); + +int dora_send_output(void *dora_context, char *id_ptr, size_t id_len, char *data_ptr, size_t data_len); diff --git a/apis/c/node/src/lib.rs b/apis/c/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d8885b85a4dd41d03b7b81f3278fe5a4e21f4783 --- /dev/null +++ b/apis/c/node/src/lib.rs @@ -0,0 +1,260 @@ +#![deny(unsafe_op_in_unsafe_fn)] + +use arrow_array::UInt8Array; +use dora_node_api::{arrow::array::AsArray, DoraNode, Event, EventStream}; +use eyre::Context; +use std::{ffi::c_void, ptr, slice}; + +pub const HEADER_NODE_API: &str = include_str!("../node_api.h"); + +struct DoraContext { + node: &'static mut DoraNode, + events: EventStream, +} + +/// Initializes a dora context from the environment variables that were set by +/// the dora-coordinator. +/// +/// Returns a pointer to the dora context on success. This pointer can be +/// used to call dora API functions that expect a `context` argument. Any +/// other use is prohibited. To free the dora context when it is no longer +/// needed, use the [`free_dora_context`] function. +/// +/// On error, a null pointer is returned. +#[no_mangle] +pub extern "C" fn init_dora_context_from_env() -> *mut c_void { + let context = || { + let (node, events) = DoraNode::init_from_env()?; + let node = Box::leak(Box::new(node)); + Result::<_, eyre::Report>::Ok(DoraContext { node, events }) + }; + let context = match context().context("failed to initialize node") { + Ok(n) => n, + Err(err) => { + let err: eyre::Error = err; + tracing::error!("{err:?}"); + return ptr::null_mut(); + } + }; + + Box::into_raw(Box::new(context)).cast() +} + +/// Frees the given dora context. +/// +/// ## Safety +/// +/// Only pointers created through [`init_dora_context_from_env`] are allowed +/// as arguments. Each context pointer must be freed exactly once. After +/// freeing, the pointer must not be used anymore. +#[no_mangle] +pub unsafe extern "C" fn free_dora_context(context: *mut c_void) { + let context: Box = unsafe { Box::from_raw(context.cast()) }; + // drop all fields except for `node` + let DoraContext { node, .. } = *context; + // convert the `'static` reference back to a Box, then drop it + let _ = unsafe { Box::from_raw(node as *const DoraNode as *mut DoraNode) }; +} + +/// Waits for the next incoming event for the node. +/// +/// Returns a pointer to the event on success. This pointer must not be used +/// directly. Instead, use the `read_dora_event_*` functions to read out the +/// type and payload of the event. When the event is not needed anymore, use +/// [`free_dora_event`] to free it again. +/// +/// Returns a null pointer when all event streams were closed. This means that +/// no more event will be available. Nodes typically react by stopping. +/// +/// ## Safety +/// +/// The `context` argument must be a dora context created through +/// [`init_dora_context_from_env`]. The context must be still valid, i.e., not +/// freed yet. +#[no_mangle] +pub unsafe extern "C" fn dora_next_event(context: *mut c_void) -> *mut c_void { + let context: &mut DoraContext = unsafe { &mut *context.cast() }; + match context.events.recv() { + Some(event) => Box::into_raw(Box::new(event)).cast(), + None => ptr::null_mut(), + } +} + +/// Reads out the type of the given event. +/// +/// ## Safety +/// +/// The `event` argument must be a dora event received through +/// [`dora_next_event`]. The event must be still valid, i.e., not +/// freed yet. +#[no_mangle] +pub unsafe extern "C" fn read_dora_event_type(event: *const ()) -> EventType { + let event: &Event = unsafe { &*event.cast() }; + match event { + Event::Stop => EventType::Stop, + Event::Input { .. } => EventType::Input, + Event::InputClosed { .. } => EventType::InputClosed, + Event::Error(_) => EventType::Error, + _ => EventType::Unknown, + } +} + +#[repr(C)] +pub enum EventType { + Stop, + Input, + InputClosed, + Error, + Unknown, +} + +/// Reads out the ID of the given input event. +/// +/// Writes the `out_ptr` and `out_len` with the start pointer and length of the +/// ID string of the input. The ID is guaranteed to be valid UTF-8. +/// +/// Writes a null pointer and length `0` if the given event is not an input event. +/// +/// ## Safety +/// +/// The `event` argument must be a dora event received through +/// [`dora_next_event`]. The event must be still valid, i.e., not +/// freed yet. The returned `out_ptr` must not be used after +/// freeing the `event`, since it points directly into the event's +/// memory. +#[no_mangle] +pub unsafe extern "C" fn read_dora_input_id( + event: *const (), + out_ptr: *mut *const u8, + out_len: *mut usize, +) { + let event: &Event = unsafe { &*event.cast() }; + match event { + Event::Input { id, .. } => { + let id = id.as_str().as_bytes(); + let ptr = id.as_ptr(); + let len = id.len(); + unsafe { + *out_ptr = ptr; + *out_len = len; + } + } + _ => unsafe { + *out_ptr = ptr::null(); + *out_len = 0; + }, + } +} + +/// Reads out the data of the given input event. +/// +/// Writes the `out_ptr` and `out_len` with the start pointer and length of the +/// input's data array. The data array is a raw byte array, whose format +/// depends on the source operator/node. +/// +/// Writes a null pointer and length `0` if the given event is not an input event +/// or when an input event has no associated data. +/// +/// ## Safety +/// +/// The `event` argument must be a dora event received through +/// [`dora_next_event`]. The event must be still valid, i.e., not +/// freed yet. The returned `out_ptr` must not be used after +/// freeing the `event`, since it points directly into the event's +/// memory. +#[no_mangle] +pub unsafe extern "C" fn read_dora_input_data( + event: *const (), + out_ptr: *mut *const u8, + out_len: *mut usize, +) { + let event: &Event = unsafe { &*event.cast() }; + match event { + Event::Input { data, metadata, .. } => match metadata.type_info.data_type { + dora_node_api::arrow::datatypes::DataType::UInt8 => { + let array: &UInt8Array = data.as_primitive(); + let ptr = array.values().as_ptr(); + unsafe { + *out_ptr = ptr; + *out_len = metadata.type_info.len; + } + } + dora_node_api::arrow::datatypes::DataType::Null => unsafe { + *out_ptr = ptr::null(); + *out_len = 0; + }, + _ => { + todo!("dora C++ Node does not yet support higher level type of arrow. Only UInt8. + The ultimate solution should be based on arrow FFI interface. Feel free to contribute :)") + } + }, + _ => unsafe { + *out_ptr = ptr::null(); + *out_len = 0; + }, + } +} + +/// Frees the given dora event. +/// +/// ## Safety +/// +/// Only pointers created through [`dora_next_event`] are allowed +/// as arguments. Each context pointer must be freed exactly once. After +/// freeing, the pointer and all derived pointers must not be used anymore. +/// This also applies to the `read_dora_event_*` functions, which return +/// pointers into the original event structure. +#[no_mangle] +pub unsafe extern "C" fn free_dora_event(event: *mut c_void) { + let _: Box = unsafe { Box::from_raw(event.cast()) }; +} + +/// Sends the given output to subscribed dora nodes/operators. +/// +/// The `id_ptr` and `id_len` fields must be the start pointer and length of an +/// UTF8-encoded string. The ID string must correspond to one of the node's +/// outputs specified in the dataflow YAML file. +/// +/// The `data_ptr` and `data_len` fields must be the start pointer and length +/// a byte array. The dora API sends this data as-is, without any processing. +/// +/// ## Safety +/// +/// - The `id_ptr` and `id_len` fields must be the start pointer and length of an +/// UTF8-encoded string. +/// - The `data_ptr` and `data_len` fields must be the start pointer and length +/// a byte array. +#[no_mangle] +pub unsafe extern "C" fn dora_send_output( + context: *mut c_void, + id_ptr: *const u8, + id_len: usize, + data_ptr: *const u8, + data_len: usize, +) -> isize { + match unsafe { try_send_output(context, id_ptr, id_len, data_ptr, data_len) } { + Ok(()) => 0, + Err(err) => { + tracing::error!("{err:?}"); + -1 + } + } +} + +unsafe fn try_send_output( + context: *mut c_void, + id_ptr: *const u8, + id_len: usize, + data_ptr: *const u8, + data_len: usize, +) -> eyre::Result<()> { + let context: &mut DoraContext = unsafe { &mut *context.cast() }; + let id = std::str::from_utf8(unsafe { slice::from_raw_parts(id_ptr, id_len) })?; + let output_id = id.to_owned().into(); + let data = unsafe { slice::from_raw_parts(data_ptr, data_len) }; + context + .node + .send_output_raw(output_id, Default::default(), data.len(), |out| { + out.copy_from_slice(data); + }) +} diff --git a/apis/c/operator/Cargo.toml b/apis/c/operator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ddac79eded47206e9a4c1a0414076262e69d86c9 --- /dev/null +++ b/apis/c/operator/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "dora-operator-api-c" +version.workspace = true +edition = "2021" +description = "C API implementation for Dora Operator" +documentation.workspace = true +license.workspace = true + +[lib] +crate-type = ["staticlib", "lib"] + +[dependencies] +dora-operator-api-types = { workspace = true } + +[build-dependencies] +dora-operator-api-types = { workspace = true } diff --git a/apis/c/operator/build.rs b/apis/c/operator/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..934146399d051b251c471785060882a8f2922947 --- /dev/null +++ b/apis/c/operator/build.rs @@ -0,0 +1,11 @@ +use std::path::Path; + +fn main() { + dora_operator_api_types::generate_headers(Path::new("operator_types.h")) + .expect("failed to create operator_types.h"); + + // don't rebuild on changes (otherwise we rebuild on every run as we're + // writing the `operator_types.h` file; cargo will still rerun this script + // when the `dora_operator_api_types` crate changes) + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/apis/c/operator/operator_api.h b/apis/c/operator/operator_api.h new file mode 100644 index 0000000000000000000000000000000000000000..24f57ab277ed80165f999e42d2293f2a0e7d3f97 --- /dev/null +++ b/apis/c/operator/operator_api.h @@ -0,0 +1,36 @@ +#ifndef __RUST_DORA_OPERATOR_API_C_WRAPPER__ +#define __RUST_DORA_OPERATOR_API_C_WRAPPER__ +#ifdef __cplusplus +extern "C" +{ +#endif + +#include +#include "operator_types.h" + +#ifdef _WIN32 +#define EXPORT __declspec(dllexport) +#else +#define EXPORT __attribute__((visibility("default"))) +#endif + + EXPORT DoraInitResult_t dora_init_operator(void); + + EXPORT DoraResult_t dora_drop_operator(void *operator_context); + + EXPORT OnEventResult_t dora_on_event( + RawEvent_t *event, + const SendOutput_t *send_output, + void *operator_context); + + static void __dora_type_assertions() + { + DoraInitOperator_t __dora_init_operator = {.init_operator = dora_init_operator}; + DoraDropOperator_t __dora_drop_operator = {.drop_operator = dora_drop_operator}; + DoraOnEvent_t __dora_on_event = {.on_event = dora_on_event}; + } +#ifdef __cplusplus +} /* extern \"C\" */ +#endif + +#endif /* __RUST_DORA_OPERATOR_API_C_WRAPPER__ */ diff --git a/apis/c/operator/operator_types.h b/apis/c/operator/operator_types.h new file mode 100644 index 0000000000000000000000000000000000000000..9cf2f3d21bc87fbaa48eaa0ca952f4d17735d4ac --- /dev/null +++ b/apis/c/operator/operator_types.h @@ -0,0 +1,180 @@ +/*! \file */ +/******************************************* + * * + * File auto-generated by `::safer_ffi`. * + * * + * Do not manually edit this file. * + * * + *******************************************/ + +#ifndef __RUST_DORA_OPERATOR_API_C__ +#define __RUST_DORA_OPERATOR_API_C__ +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include + +/** \brief + * Same as [`Vec`][`rust::Vec`], but with guaranteed `#[repr(C)]` layout + */ +typedef struct Vec_uint8 { + /** */ + uint8_t * ptr; + + /** */ + size_t len; + + /** */ + size_t cap; +} Vec_uint8_t; + +/** */ +typedef struct DoraResult { + /** */ + Vec_uint8_t * error; +} DoraResult_t; + +/** */ +typedef struct DoraDropOperator { + /** */ + DoraResult_t (*drop_operator)(void *); +} DoraDropOperator_t; + +/** */ +typedef struct DoraInitResult { + /** */ + DoraResult_t result; + + /** */ + void * operator_context; +} DoraInitResult_t; + +/** */ +typedef struct DoraInitOperator { + /** */ + DoraInitResult_t (*init_operator)(void); +} DoraInitOperator_t; + +/** */ +/** \remark Has the same ABI as `uint8_t` **/ +#ifdef DOXYGEN +typedef +#endif +enum DoraStatus { + /** */ + DORA_STATUS_CONTINUE = 0, + /** */ + DORA_STATUS_STOP = 1, + /** */ + DORA_STATUS_STOP_ALL = 2, +} +#ifndef DOXYGEN +; typedef uint8_t +#endif +DoraStatus_t; + +/** */ +typedef struct OnEventResult { + /** */ + DoraResult_t result; + + /** */ + DoraStatus_t status; +} OnEventResult_t; + +/** */ +typedef struct Input Input_t; + + +#include + +/** */ +typedef struct RawEvent { + /** */ + Input_t * input; + + /** */ + Vec_uint8_t input_closed; + + /** */ + bool stop; + + /** */ + Vec_uint8_t error; +} RawEvent_t; + +/** */ +typedef struct Output Output_t; + +/** \brief + * `Arc Ret>` + */ +typedef struct ArcDynFn1_DoraResult_Output { + /** */ + void * env_ptr; + + /** */ + DoraResult_t (*call)(void *, Output_t); + + /** */ + void (*release)(void *); + + /** */ + void (*retain)(void *); +} ArcDynFn1_DoraResult_Output_t; + +/** */ +typedef struct SendOutput { + /** */ + ArcDynFn1_DoraResult_Output_t send_output; +} SendOutput_t; + +/** */ +typedef struct DoraOnEvent { + /** */ + OnEventResult_t (*on_event)(RawEvent_t *, SendOutput_t const *, void *); +} DoraOnEvent_t; + +/** */ +typedef struct Metadata { + /** */ + Vec_uint8_t open_telemetry_context; +} Metadata_t; + +/** */ +void +dora_free_data ( + Vec_uint8_t _data); + +/** */ +void +dora_free_input_id ( + char * _input_id); + +/** */ +Vec_uint8_t +dora_read_data ( + Input_t * input); + +/** */ +char * +dora_read_input_id ( + Input_t const * input); + +/** */ +DoraResult_t +dora_send_operator_output ( + SendOutput_t const * send_output, + char const * id, + uint8_t const * data_ptr, + size_t data_len); + + +#ifdef __cplusplus +} /* extern \"C\" */ +#endif + +#endif /* __RUST_DORA_OPERATOR_API_C__ */ diff --git a/apis/c/operator/src/lib.rs b/apis/c/operator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9b6f8ea5a8c2c81d8bd3569e256fd08ac4711cc5 --- /dev/null +++ b/apis/c/operator/src/lib.rs @@ -0,0 +1,4 @@ +pub const HEADER_OPERATOR_API: &str = include_str!("../operator_api.h"); +pub const HEADER_OPERATOR_TYPES: &str = include_str!("../operator_types.h"); + +pub use dora_operator_api_types; diff --git a/apis/python/node/Cargo.toml b/apis/python/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6e05782ab0f4d3fedc256f8d865e3015d2f26156 --- /dev/null +++ b/apis/python/node/Cargo.toml @@ -0,0 +1,31 @@ +[package] +version.workspace = true +name = "dora-node-api-python" +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["tracing", "telemetry"] +tracing = ["dora-node-api/tracing"] +telemetry = ["dora-runtime/telemetry"] + +[dependencies] +dora-node-api = { workspace = true } +dora-operator-api-python = { workspace = true } +pyo3 = { workspace = true, features = ["eyre", "abi3-py37"] } +eyre = "0.6" +serde_yaml = "0.8.23" +flume = "0.10.14" +dora-runtime = { workspace = true, features = ["tracing", "metrics", "python"] } +arrow = { workspace = true, features = ["pyarrow"] } +pythonize = { workspace = true } +futures = "0.3.28" +dora-ros2-bridge-python = { workspace = true } + +[lib] +name = "dora" +crate-type = ["cdylib"] diff --git a/apis/python/node/README.md b/apis/python/node/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1faffc7756e05aa8833613ebe3273f323206d72b --- /dev/null +++ b/apis/python/node/README.md @@ -0,0 +1,21 @@ +This crate corresponds to the Node API for Dora. + +## Building + +To build the Python module for development: + +```bash +python -m venv .env +source .env/bin/activate +pip install maturin +maturin develop +``` + +## Type hinting + +Type hinting requires to run a second step + +```bash +python generate_stubs.py dora dora/__init__.pyi +maturin develop +``` diff --git a/apis/python/node/dora/__init__.py b/apis/python/node/dora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7269bda14af3690fbeda2377fc1ffc2b5952a145 --- /dev/null +++ b/apis/python/node/dora/__init__.py @@ -0,0 +1,42 @@ +""" +# dora-rs +This is the dora python client for interacting with dora dataflow. +You can install it via: +```bash +pip install dora-rs +``` +""" + +from enum import Enum + +from .dora import * + +from .dora import ( + Node, + PyEvent, + Ros2Context, + Ros2Node, + Ros2NodeOptions, + Ros2Topic, + Ros2Publisher, + Ros2Subscription, + start_runtime, + __version__, + __author__, + Ros2QosPolicies, + Ros2Durability, + Ros2Liveliness, +) + + +class DoraStatus(Enum): + """Dora status to indicate if operator `on_input` loop + should be stopped. + Args: + Enum (u8): Status signaling to dora operator to + stop or continue the operator. + """ + + CONTINUE = 0 + STOP = 1 + STOP_ALL = 2 diff --git a/apis/python/node/dora/__init__.pyi b/apis/python/node/dora/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3dc33676364d0780904dab940109bcae81992efb --- /dev/null +++ b/apis/python/node/dora/__init__.pyi @@ -0,0 +1,320 @@ +import dora +import pyarrow +import typing + +@typing.final +class Enum: + """Generic enumeration. + +Derive from this class to define new enumerations.""" + __members__: mappingproxy = ... + +@typing.final +class Node: + """The custom node API lets you integrate `dora` into your application. +It allows you to retrieve input and send output in any fashion you want. + +Use with: + +```python +from dora import Node + +node = Node() +```""" + + def __init__(self) -> None: + """The custom node API lets you integrate `dora` into your application. +It allows you to retrieve input and send output in any fashion you want. + +Use with: + +```python +from dora import Node + +node = Node() +```""" + + def dataflow_descriptor(self) -> dict: + """Returns the full dataflow descriptor that this node is part of. + +This method returns the parsed dataflow YAML file.""" + + def dataflow_id(self) -> str: + """Returns the dataflow id.""" + + def merge_external_events(self, subscription: dora.Ros2Subscription) -> None: + """Merge an external event stream with dora main loop. +This currently only work with ROS2.""" + + def next(self, timeout: float=None) -> dora.PyEvent: + """`.next()` gives you the next input that the node has received. +It blocks until the next event becomes available. +You can use timeout in seconds to return if no input is available. +It will return `None` when all senders has been dropped. + +```python +event = node.next() +``` + +You can also iterate over the event stream with a loop + +```python +for event in node: +match event["type"]: +case "INPUT": +match event["id"]: +case "image": +```""" + + def send_output(self, output_id: str, data: pyarrow.Array, metadata: dict=None) -> None: + """`send_output` send data from the node. + +```python +Args: +output_id: str, +data: pyarrow.Array, +metadata: Option[Dict], +``` + +ex: + +```python +node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) +```""" + + def __iter__(self) -> typing.Any: + """Implement iter(self).""" + + def __next__(self) -> typing.Any: + """Implement next(self).""" + +@typing.final +class PyEvent: + """Dora Event""" + + def inner(self):... + + def __getitem__(self, key: typing.Any) -> typing.Any: + """Return self[key].""" + +@typing.final +class Ros2Context: + """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. + +By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. + +AMENT_PREFIX_PATH folder structure should be the following: + +- For messages: /msg/.msg +- For services: /srv/.srv + +You can also use `ros_paths` if you don't want to use env variable. + +warning:: +dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change. + +```python +context = Ros2Context() +```""" + + def __init__(self, ros_paths: typing.List[str]=None) -> None: + """ROS2 Context holding all messages definition for receiving and sending messages to ROS2. + +By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. + +AMENT_PREFIX_PATH folder structure should be the following: + +- For messages: /msg/.msg +- For services: /srv/.srv + +You can also use `ros_paths` if you don't want to use env variable. + +warning:: +dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change. + +```python +context = Ros2Context() +```""" + + def new_node(self, name: str, namespace: str, options: dora.Ros2NodeOptions) -> dora.Ros2Node: + """Create a new ROS2 node + +```python +ros2_node = ros2_context.new_node( +"turtle_teleop", +"/ros2_demo", +Ros2NodeOptions(rosout=True), +) +``` + +warning:: +dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + +@typing.final +class Ros2Durability: + """DDS 2.2.3.4 DURABILITY""" + + def __eq__(self, value: typing.Any) -> bool: + """Return self==value.""" + + def __ge__(self, value: typing.Any) -> bool: + """Return self>=value.""" + + def __gt__(self, value: typing.Any) -> bool: + """Return self>value.""" + + def __int__(self) -> None: + """int(self)""" + + def __le__(self, value: typing.Any) -> bool: + """Return self<=value.""" + + def __lt__(self, value: typing.Any) -> bool: + """Return self bool: + """Return self!=value.""" + + def __repr__(self) -> str: + """Return repr(self).""" + Persistent: Ros2Durability = ... + Transient: Ros2Durability = ... + TransientLocal: Ros2Durability = ... + Volatile: Ros2Durability = ... + +@typing.final +class Ros2Liveliness: + """DDS 2.2.3.11 LIVELINESS""" + + def __eq__(self, value: typing.Any) -> bool: + """Return self==value.""" + + def __ge__(self, value: typing.Any) -> bool: + """Return self>=value.""" + + def __gt__(self, value: typing.Any) -> bool: + """Return self>value.""" + + def __int__(self) -> None: + """int(self)""" + + def __le__(self, value: typing.Any) -> bool: + """Return self<=value.""" + + def __lt__(self, value: typing.Any) -> bool: + """Return self bool: + """Return self!=value.""" + + def __repr__(self) -> str: + """Return repr(self).""" + Automatic: Ros2Liveliness = ... + ManualByParticipant: Ros2Liveliness = ... + ManualByTopic: Ros2Liveliness = ... + +@typing.final +class Ros2Node: + """ROS2 Node + +warnings:: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change. +- There's a known issue about ROS2 nodes not being discoverable by ROS2 +See: https://github.com/jhelovuo/ros2-client/issues/4""" + + def create_publisher(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Publisher: + """Create a ROS2 publisher + +```python +pose_publisher = ros2_node.create_publisher(turtle_pose_topic) +``` +warnings: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + + def create_subscription(self, topic: dora.Ros2Topic, qos: dora.Ros2QosPolicies=None) -> dora.Ros2Subscription: + """Create a ROS2 subscription + +```python +pose_reader = ros2_node.create_subscription(turtle_pose_topic) +``` + +warnings: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + + def create_topic(self, name: str, message_type: str, qos: dora.Ros2QosPolicies) -> dora.Ros2Topic: + """Create a ROS2 topic to connect to. + +```python +turtle_twist_topic = ros2_node.create_topic( +"/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos +) +```""" + +@typing.final +class Ros2NodeOptions: + """ROS2 Node Options""" + + def __init__(self, rosout: bool=None) -> None: + """ROS2 Node Options""" + +@typing.final +class Ros2Publisher: + """ROS2 Publisher + +warnings: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + + def publish(self, data: pyarrow.Array) -> None: + """Publish a message into ROS2 topic. + +Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. + +ex: +```python +gripper_command.publish( +pa.array( +[ +{ +"name": "gripper", +"cmd": np.float32(5), +} +] +), +) +```""" + +@typing.final +class Ros2QosPolicies: + """ROS2 QoS Policy""" + + def __init__(self, durability: dora.Ros2Durability=None, liveliness: dora.Ros2Liveliness=None, reliable: bool=None, keep_all: bool=None, lease_duration: float=None, max_blocking_time: float=None, keep_last: int=None) -> dora.Ros2QoSPolicies: + """ROS2 QoS Policy""" + +@typing.final +class Ros2Subscription: + """ROS2 Subscription + + +warnings: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + + def next(self):... + +@typing.final +class Ros2Topic: + """ROS2 Topic + +warnings: +- dora Ros2 bridge functionality is considered **unstable**. It may be changed +at any point without it being considered a breaking change.""" + +def start_runtime() -> None: + """Start a runtime for Operators""" \ No newline at end of file diff --git a/apis/python/node/dora/__pycache__/__init__.cpython-38.pyc b/apis/python/node/dora/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aba9557d795eb7d6dd5da66ee8f898cc7d50020 Binary files /dev/null and b/apis/python/node/dora/__pycache__/__init__.cpython-38.pyc differ diff --git a/apis/python/node/generate_stubs.py b/apis/python/node/generate_stubs.py new file mode 100644 index 0000000000000000000000000000000000000000..db9e3f83b67b2bbcbce14236c51928b95db89574 --- /dev/null +++ b/apis/python/node/generate_stubs.py @@ -0,0 +1,517 @@ +import argparse +import ast +import importlib +import inspect +import logging +import re +import subprocess +from functools import reduce +from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union + + +def path_to_type(*elements: str) -> ast.AST: + base: ast.AST = ast.Name(id=elements[0], ctx=ast.Load()) + for e in elements[1:]: + base = ast.Attribute(value=base, attr=e, ctx=ast.Load()) + return base + + +OBJECT_MEMBERS = dict(inspect.getmembers(object)) +BUILTINS: Dict[str, Union[None, Tuple[List[ast.AST], ast.AST]]] = { + "__annotations__": None, + "__bool__": ([], path_to_type("bool")), + "__bytes__": ([], path_to_type("bytes")), + "__class__": None, + "__contains__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__del__": None, + "__delattr__": ([path_to_type("str")], path_to_type("None")), + "__delitem__": ([path_to_type("typing", "Any")], path_to_type("typing", "Any")), + "__dict__": None, + "__dir__": None, + "__doc__": None, + "__eq__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__format__": ([path_to_type("str")], path_to_type("str")), + "__ge__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__getattribute__": ([path_to_type("str")], path_to_type("typing", "Any")), + "__getitem__": ([path_to_type("typing", "Any")], path_to_type("typing", "Any")), + "__gt__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__hash__": ([], path_to_type("int")), + "__init__": ([], path_to_type("None")), + "__init_subclass__": None, + "__iter__": ([], path_to_type("typing", "Any")), + "__le__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__len__": ([], path_to_type("int")), + "__lt__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__module__": None, + "__ne__": ([path_to_type("typing", "Any")], path_to_type("bool")), + "__new__": None, + "__next__": ([], path_to_type("typing", "Any")), + "__int__": ([], path_to_type("None")), + "__reduce__": None, + "__reduce_ex__": None, + "__repr__": ([], path_to_type("str")), + "__setattr__": ( + [path_to_type("str"), path_to_type("typing", "Any")], + path_to_type("None"), + ), + "__setitem__": ( + [path_to_type("typing", "Any"), path_to_type("typing", "Any")], + path_to_type("typing", "Any"), + ), + "__sizeof__": None, + "__str__": ([], path_to_type("str")), + "__subclasshook__": None, +} + + +def module_stubs(module: Any) -> ast.Module: + types_to_import = {"typing"} + classes = [] + functions = [] + for member_name, member_value in inspect.getmembers(module): + element_path = [module.__name__, member_name] + if member_name.startswith("__"): + pass + elif member_name.startswith("DoraStatus"): + pass + elif inspect.isclass(member_value): + classes.append( + class_stubs(member_name, member_value, element_path, types_to_import) + ) + elif inspect.isbuiltin(member_value): + functions.append( + function_stub( + member_name, + member_value, + element_path, + types_to_import, + in_class=False, + ) + ) + else: + logging.warning(f"Unsupported root construction {member_name}") + return ast.Module( + body=[ast.Import(names=[ast.alias(name=t)]) for t in sorted(types_to_import)] + + classes + + functions, + type_ignores=[], + ) + + +def class_stubs( + cls_name: str, cls_def: Any, element_path: List[str], types_to_import: Set[str] +) -> ast.ClassDef: + attributes: List[ast.AST] = [] + methods: List[ast.AST] = [] + magic_methods: List[ast.AST] = [] + constants: List[ast.AST] = [] + for member_name, member_value in inspect.getmembers(cls_def): + current_element_path = [*element_path, member_name] + if member_name == "__init__": + try: + inspect.signature(cls_def) # we check it actually exists + methods = [ + function_stub( + member_name, + cls_def, + current_element_path, + types_to_import, + in_class=True, + ), + *methods, + ] + except ValueError as e: + if "no signature found" not in str(e): + raise ValueError( + f"Error while parsing signature of {cls_name}.__init_" + ) from e + elif ( + member_value == OBJECT_MEMBERS.get(member_name) + or BUILTINS.get(member_name, ()) is None + ): + pass + elif inspect.isdatadescriptor(member_value): + attributes.extend( + data_descriptor_stub( + member_name, member_value, current_element_path, types_to_import + ) + ) + elif inspect.isroutine(member_value): + (magic_methods if member_name.startswith("__") else methods).append( + function_stub( + member_name, + member_value, + current_element_path, + types_to_import, + in_class=True, + ) + ) + elif member_name == "__match_args__": + constants.append( + ast.AnnAssign( + target=ast.Name(id=member_name, ctx=ast.Store()), + annotation=ast.Subscript( + value=path_to_type("tuple"), + slice=ast.Tuple( + elts=[path_to_type("str"), ast.Ellipsis()], ctx=ast.Load() + ), + ctx=ast.Load(), + ), + value=ast.Constant(member_value), + simple=1, + ) + ) + elif member_value is not None: + constants.append( + ast.AnnAssign( + target=ast.Name(id=member_name, ctx=ast.Store()), + annotation=concatenated_path_to_type( + member_value.__class__.__name__, element_path, types_to_import + ), + value=ast.Ellipsis(), + simple=1, + ) + ) + else: + logging.warning( + f"Unsupported member {member_name} of class {'.'.join(element_path)}" + ) + + doc = inspect.getdoc(cls_def) + doc_comment = build_doc_comment(doc) if doc else None + return ast.ClassDef( + cls_name, + bases=[], + keywords=[], + body=( + ([doc_comment] if doc_comment else []) + + attributes + + methods + + magic_methods + + constants + ) + or [ast.Ellipsis()], + decorator_list=[path_to_type("typing", "final")], + ) + + +def data_descriptor_stub( + data_desc_name: str, + data_desc_def: Any, + element_path: List[str], + types_to_import: Set[str], +) -> Union[Tuple[ast.AnnAssign, ast.Expr], Tuple[ast.AnnAssign]]: + annotation = None + doc_comment = None + + doc = inspect.getdoc(data_desc_def) + if doc is not None: + annotation = returns_stub(data_desc_name, doc, element_path, types_to_import) + m = re.findall(r"^ *:return: *(.*) *$", doc, re.MULTILINE) + if len(m) == 1: + doc_comment = m[0] + elif len(m) > 1: + raise ValueError( + f"Multiple return annotations found with :return: in {'.'.join(element_path)} documentation" + ) + + assign = ast.AnnAssign( + target=ast.Name(id=data_desc_name, ctx=ast.Store()), + annotation=annotation or path_to_type("typing", "Any"), + simple=1, + ) + doc_comment = build_doc_comment(doc_comment) if doc_comment else None + return (assign, doc_comment) if doc_comment else (assign,) + + +def function_stub( + fn_name: str, + fn_def: Any, + element_path: List[str], + types_to_import: Set[str], + *, + in_class: bool, +) -> ast.FunctionDef: + body: List[ast.AST] = [] + doc = inspect.getdoc(fn_def) + if doc is not None: + doc_comment = build_doc_comment(doc) + if doc_comment is not None: + body.append(doc_comment) + + decorator_list = [] + if in_class and hasattr(fn_def, "__self__"): + decorator_list.append(ast.Name("staticmethod")) + + return ast.FunctionDef( + fn_name, + arguments_stub(fn_name, fn_def, doc or "", element_path, types_to_import), + body or [ast.Ellipsis()], + decorator_list=decorator_list, + returns=( + returns_stub(fn_name, doc, element_path, types_to_import) if doc else None + ), + lineno=0, + ) + + +def arguments_stub( + callable_name: str, + callable_def: Any, + doc: str, + element_path: List[str], + types_to_import: Set[str], +) -> ast.arguments: + real_parameters: Mapping[str, inspect.Parameter] = inspect.signature( + callable_def + ).parameters + if callable_name == "__init__": + real_parameters = { + "self": inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY), + **real_parameters, + } + + parsed_param_types = {} + optional_params = set() + + # Types for magic functions types + builtin = BUILTINS.get(callable_name) + if isinstance(builtin, tuple): + param_names = list(real_parameters.keys()) + if param_names and param_names[0] == "self": + del param_names[0] + for name, t in zip(param_names, builtin[0]): + parsed_param_types[name] = t + + # Types from comment + for match in re.findall( + r"^ *:type *([a-zA-Z0-9_]+): ([^\n]*) *$", doc, re.MULTILINE + ): + if match[0] not in real_parameters: + raise ValueError( + f"The parameter {match[0]} of {'.'.join(element_path)} " + "is defined in the documentation but not in the function signature" + ) + type = match[1] + if type.endswith(", optional"): + optional_params.add(match[0]) + type = type[:-10] + parsed_param_types[match[0]] = convert_type_from_doc( + type, element_path, types_to_import + ) + + # we parse the parameters + posonlyargs = [] + args = [] + vararg = None + kwonlyargs = [] + kw_defaults = [] + kwarg = None + defaults = [] + for param in real_parameters.values(): + if param.name != "self" and param.name not in parsed_param_types: + raise ValueError( + f"The parameter {param.name} of {'.'.join(element_path)} " + "has no type definition in the function documentation" + ) + param_ast = ast.arg( + arg=param.name, annotation=parsed_param_types.get(param.name) + ) + + default_ast = None + if param.default != param.empty: + default_ast = ast.Constant(param.default) + if param.name not in optional_params: + raise ValueError( + f"Parameter {param.name} of {'.'.join(element_path)} " + "is optional according to the type but not flagged as such in the doc" + ) + elif param.name in optional_params: + raise ValueError( + f"Parameter {param.name} of {'.'.join(element_path)} " + "is optional according to the documentation but has no default value" + ) + + if param.kind == param.POSITIONAL_ONLY: + args.append(param_ast) + # posonlyargs.append(param_ast) + # defaults.append(default_ast) + elif param.kind == param.POSITIONAL_OR_KEYWORD: + args.append(param_ast) + defaults.append(default_ast) + elif param.kind == param.VAR_POSITIONAL: + vararg = param_ast + elif param.kind == param.KEYWORD_ONLY: + kwonlyargs.append(param_ast) + kw_defaults.append(default_ast) + elif param.kind == param.VAR_KEYWORD: + kwarg = param_ast + + return ast.arguments( + posonlyargs=posonlyargs, + args=args, + vararg=vararg, + kwonlyargs=kwonlyargs, + kw_defaults=kw_defaults, + defaults=defaults, + kwarg=kwarg, + ) + + +def returns_stub( + callable_name: str, doc: str, element_path: List[str], types_to_import: Set[str] +) -> Optional[ast.AST]: + m = re.findall(r"^ *:rtype: *([^\n]*) *$", doc, re.MULTILINE) + if len(m) == 0: + builtin = BUILTINS.get(callable_name) + if isinstance(builtin, tuple) and builtin[1] is not None: + return builtin[1] + raise ValueError( + f"The return type of {'.'.join(element_path)} " + "has no type definition using :rtype: in the function documentation" + ) + if len(m) > 1: + raise ValueError( + f"Multiple return type annotations found with :rtype: for {'.'.join(element_path)}" + ) + return convert_type_from_doc(m[0], element_path, types_to_import) + + +def convert_type_from_doc( + type_str: str, element_path: List[str], types_to_import: Set[str] +) -> ast.AST: + type_str = type_str.strip() + return parse_type_to_ast(type_str, element_path, types_to_import) + + +def parse_type_to_ast( + type_str: str, element_path: List[str], types_to_import: Set[str] +) -> ast.AST: + # let's tokenize + tokens = [] + current_token = "" + for c in type_str: + if "a" <= c <= "z" or "A" <= c <= "Z" or c == ".": + current_token += c + else: + if current_token: + tokens.append(current_token) + current_token = "" + if c != " ": + tokens.append(c) + if current_token: + tokens.append(current_token) + + # let's first parse nested parenthesis + stack: List[List[Any]] = [[]] + for token in tokens: + if token == "[": + children: List[str] = [] + stack[-1].append(children) + stack.append(children) + elif token == "]": + stack.pop() + else: + stack[-1].append(token) + + # then it's easy + def parse_sequence(sequence: List[Any]) -> ast.AST: + # we split based on "or" + or_groups: List[List[str]] = [[]] + print(sequence) + # TODO: Fix sequence + if "Ros" in sequence and "2" in sequence: + sequence = ["".join(sequence)] + elif "dora.Ros" in sequence and "2" in sequence: + sequence = ["".join(sequence)] + + for e in sequence: + if e == "or": + or_groups.append([]) + else: + or_groups[-1].append(e) + if any(not g for g in or_groups): + raise ValueError( + f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" + ) + + new_elements: List[ast.AST] = [] + for group in or_groups: + if len(group) == 1 and isinstance(group[0], str): + new_elements.append( + concatenated_path_to_type(group[0], element_path, types_to_import) + ) + elif ( + len(group) == 2 + and isinstance(group[0], str) + and isinstance(group[1], list) + ): + new_elements.append( + ast.Subscript( + value=concatenated_path_to_type( + group[0], element_path, types_to_import + ), + slice=parse_sequence(group[1]), + ctx=ast.Load(), + ) + ) + else: + raise ValueError( + f"Not able to parse type '{type_str}' used by {'.'.join(element_path)}" + ) + return reduce( + lambda left, right: ast.BinOp(left=left, op=ast.BitOr(), right=right), + new_elements, + ) + + return parse_sequence(stack[0]) + + +def concatenated_path_to_type( + path: str, element_path: List[str], types_to_import: Set[str] +) -> ast.AST: + parts = path.split(".") + if any(not p for p in parts): + raise ValueError( + f"Not able to parse type '{path}' used by {'.'.join(element_path)}" + ) + if len(parts) > 1: + types_to_import.add(".".join(parts[:-1])) + return path_to_type(*parts) + + +def build_doc_comment(doc: str) -> Optional[ast.Expr]: + lines = [line.strip() for line in doc.split("\n")] + clean_lines = [] + for line in lines: + if line.startswith((":type", ":rtype")): + continue + clean_lines.append(line) + text = "\n".join(clean_lines).strip() + return ast.Expr(value=ast.Constant(text)) if text else None + + +def format_with_ruff(file: str) -> None: + subprocess.check_call(["python", "-m", "ruff", "format", file]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Extract Python type stub from a python module." + ) + parser.add_argument( + "module_name", help="Name of the Python module for which generate stubs" + ) + parser.add_argument( + "out", + help="Name of the Python stub file to write to", + type=argparse.FileType("wt"), + ) + parser.add_argument( + "--ruff", help="Formats the generated stubs using Ruff", action="store_true" + ) + args = parser.parse_args() + stub_content = ast.unparse(module_stubs(importlib.import_module(args.module_name))) + args.out.write(stub_content) + if args.ruff: + format_with_ruff(args.out.name) diff --git a/apis/python/node/pyproject.toml b/apis/python/node/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..a7c3a22a295865334e58b1503e81eb1c22ee544a --- /dev/null +++ b/apis/python/node/pyproject.toml @@ -0,0 +1,11 @@ +[build-system] +requires = ["maturin>=0.13.2"] +build-backend = "maturin" + +[project] +name = "dora-rs" +# Install pyarrow at the same time of dora-rs +dependencies = ['pyarrow'] + +[tool.maturin] +features = ["pyo3/extension-module"] diff --git a/apis/python/node/src/lib.rs b/apis/python/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ba9c6c46fa046d9740b686ec6f127e896f4e0781 --- /dev/null +++ b/apis/python/node/src/lib.rs @@ -0,0 +1,264 @@ +#![allow(clippy::borrow_deref_ref)] // clippy warns about code generated by #[pymethods] + +use std::time::Duration; + +use arrow::pyarrow::{FromPyArrow, ToPyArrow}; +use dora_node_api::merged::{MergeExternalSend, MergedEvent}; +use dora_node_api::{DoraNode, EventStream}; +use dora_operator_api_python::{pydict_to_metadata, PyEvent}; +use dora_ros2_bridge_python::Ros2Subscription; +use eyre::Context; +use futures::{Stream, StreamExt}; +use pyo3::prelude::*; +use pyo3::types::{PyBytes, PyDict}; + +/// The custom node API lets you integrate `dora` into your application. +/// It allows you to retrieve input and send output in any fashion you want. +/// +/// Use with: +/// +/// ```python +/// from dora import Node +/// +/// node = Node() +/// ``` +/// +#[pyclass] +pub struct Node { + events: Events, + node: DoraNode, +} + +#[pymethods] +impl Node { + #[new] + pub fn new() -> eyre::Result { + let (node, events) = DoraNode::init_from_env()?; + + Ok(Node { + events: Events::Dora(events), + node, + }) + } + + /// `.next()` gives you the next input that the node has received. + /// It blocks until the next event becomes available. + /// You can use timeout in seconds to return if no input is available. + /// It will return `None` when all senders has been dropped. + /// + /// ```python + /// event = node.next() + /// ``` + /// + /// You can also iterate over the event stream with a loop + /// + /// ```python + /// for event in node: + /// match event["type"]: + /// case "INPUT": + /// match event["id"]: + /// case "image": + /// ``` + /// + /// :type timeout: float, optional + /// :rtype: dora.PyEvent + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self, py: Python, timeout: Option) -> PyResult> { + let event = py.allow_threads(|| self.events.recv(timeout.map(Duration::from_secs_f32))); + Ok(event) + } + + /// You can iterate over the event stream with a loop + /// + /// ```python + /// for event in node: + /// match event["type"]: + /// case "INPUT": + /// match event["id"]: + /// case "image": + /// ``` + /// + /// :rtype: dora.PyEvent + pub fn __next__(&mut self, py: Python) -> PyResult> { + let event = py.allow_threads(|| self.events.recv(None)); + Ok(event) + } + + /// You can iterate over the event stream with a loop + /// + /// ```python + /// for event in node: + /// match event["type"]: + /// case "INPUT": + /// match event["id"]: + /// case "image": + /// ``` + /// + /// :rtype: dora.PyEvent + fn __iter__(slf: PyRef<'_, Self>) -> PyRef<'_, Self> { + slf + } + + /// `send_output` send data from the node. + /// + /// ```python + /// Args: + /// output_id: str, + /// data: pyarrow.Array, + /// metadata: Option[Dict], + /// ``` + /// + /// ex: + /// + /// ```python + /// node.send_output("string", b"string", {"open_telemetry_context": "7632e76"}) + /// ``` + /// + /// :type output_id: str + /// :type data: pyarrow.Array + /// :type metadata: dict, optional + /// :rtype: None + pub fn send_output( + &mut self, + output_id: String, + data: PyObject, + metadata: Option>, + py: Python, + ) -> eyre::Result<()> { + let parameters = pydict_to_metadata(metadata)?; + + if let Ok(py_bytes) = data.downcast_bound::(py) { + let data = py_bytes.as_bytes(); + self.node + .send_output_bytes(output_id.into(), parameters, data.len(), data) + .wrap_err("failed to send output")?; + } else if let Ok(arrow_array) = arrow::array::ArrayData::from_pyarrow_bound(data.bind(py)) { + self.node.send_output( + output_id.into(), + parameters, + arrow::array::make_array(arrow_array), + )?; + } else { + eyre::bail!("invalid `data` type, must by `PyBytes` or arrow array") + } + + Ok(()) + } + + /// Returns the full dataflow descriptor that this node is part of. + /// + /// This method returns the parsed dataflow YAML file. + /// + /// :rtype: dict + pub fn dataflow_descriptor(&self, py: Python) -> pythonize::Result { + pythonize::pythonize(py, self.node.dataflow_descriptor()) + } + + /// Returns the dataflow id. + /// + /// :rtype: str + pub fn dataflow_id(&self) -> String { + self.node.dataflow_id().to_string() + } + + /// Merge an external event stream with dora main loop. + /// This currently only work with ROS2. + /// + /// :type subscription: dora.Ros2Subscription + /// :rtype: None + pub fn merge_external_events( + &mut self, + subscription: &mut Ros2Subscription, + ) -> eyre::Result<()> { + let subscription = subscription.into_stream()?; + let stream = futures::stream::poll_fn(move |cx| { + let s = subscription.as_stream().map(|item| { + match item.context("failed to read ROS2 message") { + Ok((value, _info)) => Python::with_gil(|py| { + value + .to_pyarrow(py) + .context("failed to convert value to pyarrow") + .unwrap_or_else(|err| PyErr::from(err).to_object(py)) + }), + Err(err) => Python::with_gil(|py| PyErr::from(err).to_object(py)), + } + }); + futures::pin_mut!(s); + s.poll_next_unpin(cx) + }); + + // take out the event stream and temporarily replace it with a dummy + let events = std::mem::replace( + &mut self.events, + Events::Merged(Box::new(futures::stream::empty())), + ); + // update self.events with the merged stream + self.events = Events::Merged(events.merge_external_send(Box::pin(stream))); + + Ok(()) + } +} + +enum Events { + Dora(EventStream), + Merged(Box> + Unpin + Send>), +} + +impl Events { + fn recv(&mut self, timeout: Option) -> Option { + match self { + Events::Dora(events) => match timeout { + Some(timeout) => events.recv_timeout(timeout).map(PyEvent::from), + None => events.recv().map(PyEvent::from), + }, + Events::Merged(events) => futures::executor::block_on(events.next()).map(PyEvent::from), + } + } +} + +impl<'a> MergeExternalSend<'a, PyObject> for Events { + type Item = MergedEvent; + + fn merge_external_send( + self, + external_events: impl Stream + Unpin + Send + 'a, + ) -> Box + Unpin + Send + 'a> { + match self { + Events::Dora(events) => events.merge_external_send(external_events), + Events::Merged(events) => { + let merged = events.merge_external_send(external_events); + Box::new(merged.map(|event| match event { + MergedEvent::Dora(e) => MergedEvent::Dora(e), + MergedEvent::External(e) => MergedEvent::External(e.flatten()), + })) + } + } + } +} + +impl Node { + pub fn id(&self) -> String { + self.node.id().to_string() + } +} + +/// Start a runtime for Operators +/// +/// :rtype: None +#[pyfunction] +pub fn start_runtime() -> eyre::Result<()> { + dora_runtime::main().wrap_err("Dora Runtime raised an error.") +} + +#[pymodule] +fn dora(_py: Python, m: Bound<'_, PyModule>) -> PyResult<()> { + dora_ros2_bridge_python::create_dora_ros2_bridge_module(&m)?; + + m.add_function(wrap_pyfunction!(start_runtime, &m)?)?; + m.add_class::()?; + m.add_class::()?; + m.setattr("__version__", env!("CARGO_PKG_VERSION"))?; + m.setattr("__author__", "Dora-rs Authors")?; + + Ok(()) +} diff --git a/apis/python/operator/Cargo.toml b/apis/python/operator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..07cef1f07b9560c20c94097c5ed4b7fb0b50c858 --- /dev/null +++ b/apis/python/operator/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dora-operator-api-python" +version.workspace = true +edition = "2021" + +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true } +pyo3 = { workspace = true, features = ["eyre", "abi3-py37"] } +eyre = "0.6" +serde_yaml = "0.8.23" +flume = "0.10.14" +arrow = { workspace = true, features = ["pyarrow"] } +arrow-schema = { workspace = true } +aligned-vec = "0.5.0" diff --git a/apis/python/operator/src/lib.rs b/apis/python/operator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc74279e9816c26ea1af7363ef26202f36fdecda --- /dev/null +++ b/apis/python/operator/src/lib.rs @@ -0,0 +1,259 @@ +use arrow::{array::ArrayRef, pyarrow::ToPyArrow}; +use dora_node_api::{merged::MergedEvent, Event, Metadata, MetadataParameters}; +use eyre::{Context, Result}; +use pyo3::{exceptions::PyLookupError, prelude::*, pybacked::PyBackedStr, types::PyDict}; + +/// Dora Event +#[pyclass] +#[derive(Debug)] +pub struct PyEvent { + event: MergedEvent, + data: Option, +} + +// Dora Event +#[pymethods] +impl PyEvent { + /// + /// :rtype: dora.PyObject + pub fn __getitem__(&self, key: &str, py: Python<'_>) -> PyResult> { + if key == "kind" { + let kind = match &self.event { + MergedEvent::Dora(_) => "dora", + MergedEvent::External(_) => "external", + }; + return Ok(Some(kind.to_object(py))); + } + match &self.event { + MergedEvent::Dora(event) => { + let value = match key { + "type" => Some(Self::ty(event).to_object(py)), + "id" => Self::id(event).map(|v| v.to_object(py)), + "value" => self.value(py)?, + "metadata" => Self::metadata(event, py), + "error" => Self::error(event).map(|v| v.to_object(py)), + other => { + return Err(PyLookupError::new_err(format!( + "event has no property `{other}`" + ))) + } + }; + Ok(value) + } + MergedEvent::External(event) => { + let value = match key { + "value" => event, + _ => todo!(), + }; + + Ok(Some(value.clone())) + } + } + } + + pub fn inner(&mut self) -> Option<&PyObject> { + match &self.event { + MergedEvent::Dora(_) => None, + MergedEvent::External(event) => Some(event), + } + } + + fn __str__(&self) -> PyResult { + Ok(format!("{:#?}", &self.event)) + } +} + +impl PyEvent { + fn ty(event: &Event) -> &str { + match event { + Event::Stop => "STOP", + Event::Input { .. } => "INPUT", + Event::InputClosed { .. } => "INPUT_CLOSED", + Event::Error(_) => "ERROR", + _other => "UNKNOWN", + } + } + + fn id(event: &Event) -> Option<&str> { + match event { + Event::Input { id, .. } => Some(id), + Event::InputClosed { id } => Some(id), + _ => None, + } + } + + /// Returns the payload of an input event as an arrow array (if any). + fn value(&self, py: Python<'_>) -> PyResult> { + match (&self.event, &self.data) { + (MergedEvent::Dora(Event::Input { .. }), Some(data)) => { + // TODO: Does this call leak data? + let array_data = data.to_data().to_pyarrow(py)?; + Ok(Some(array_data)) + } + _ => Ok(None), + } + } + + fn metadata(event: &Event, py: Python<'_>) -> Option { + match event { + Event::Input { metadata, .. } => Some(metadata_to_pydict(metadata, py).to_object(py)), + _ => None, + } + } + + fn error(event: &Event) -> Option<&str> { + match event { + Event::Error(error) => Some(error), + _other => None, + } + } +} + +impl From for PyEvent { + fn from(event: Event) -> Self { + Self::from(MergedEvent::Dora(event)) + } +} + +impl From> for PyEvent { + fn from(mut event: MergedEvent) -> Self { + let data = if let MergedEvent::Dora(Event::Input { data, .. }) = &mut event { + Some(data.clone()) + } else { + None + }; + Self { event, data } + } +} + +pub fn pydict_to_metadata(dict: Option>) -> Result { + let mut default_metadata = MetadataParameters::default(); + if let Some(metadata) = dict { + for (key, value) in metadata.iter() { + match key + .extract::() + .context("Parsing metadata keys")? + .as_ref() + { + "watermark" => { + default_metadata.watermark = + value.extract().context("parsing watermark failed")?; + } + "deadline" => { + default_metadata.deadline = + value.extract().context("parsing deadline failed")?; + } + "open_telemetry_context" => { + let otel_context: PyBackedStr = value + .extract() + .context("parsing open telemetry context failed")?; + default_metadata.open_telemetry_context = otel_context.to_string(); + } + _ => (), + } + } + } + Ok(default_metadata) +} + +pub fn metadata_to_pydict<'a>(metadata: &'a Metadata, py: Python<'a>) -> pyo3::Bound<'a, PyDict> { + let dict = PyDict::new_bound(py); + dict.set_item( + "open_telemetry_context", + &metadata.parameters.open_telemetry_context, + ) + .wrap_err("could not make metadata a python dictionary item") + .unwrap(); + dict +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use aligned_vec::{AVec, ConstAlign}; + use arrow::{ + array::{ + ArrayData, ArrayRef, BooleanArray, Float64Array, Int32Array, Int64Array, Int8Array, + ListArray, StructArray, + }, + buffer::Buffer, + }; + + use arrow_schema::{DataType, Field}; + use dora_node_api::{ + arrow_utils::{copy_array_into_sample, required_data_size}, + RawData, + }; + use eyre::{Context, Result}; + + fn assert_roundtrip(arrow_array: &ArrayData) -> Result<()> { + let size = required_data_size(arrow_array); + let mut sample: AVec> = AVec::__from_elem(128, 0, size); + + let info = copy_array_into_sample(&mut sample, arrow_array); + + let serialized_deserialized_arrow_array = RawData::Vec(sample) + .into_arrow_array(&info) + .context("Could not create arrow array")?; + assert_eq!(arrow_array, &serialized_deserialized_arrow_array); + + Ok(()) + } + + #[test] + fn serialize_deserialize_arrow() -> Result<()> { + // Int8 + let arrow_array = Int8Array::from(vec![1, -2, 3, 4]).into(); + assert_roundtrip(&arrow_array).context("Int8Array roundtrip failed")?; + + // Int64 + let arrow_array = Int64Array::from(vec![1, -2, 3, 4]).into(); + assert_roundtrip(&arrow_array).context("Int64Array roundtrip failed")?; + + // Float64 + let arrow_array = Float64Array::from(vec![1., -2., 3., 4.]).into(); + assert_roundtrip(&arrow_array).context("Float64Array roundtrip failed")?; + + // Struct + let boolean = Arc::new(BooleanArray::from(vec![false, false, true, true])); + let int = Arc::new(Int32Array::from(vec![42, 28, 19, 31])); + + let struct_array = StructArray::from(vec![ + ( + Arc::new(Field::new("b", DataType::Boolean, false)), + boolean as ArrayRef, + ), + ( + Arc::new(Field::new("c", DataType::Int32, false)), + int as ArrayRef, + ), + ]) + .into(); + assert_roundtrip(&struct_array).context("StructArray roundtrip failed")?; + + // List + let value_data = ArrayData::builder(DataType::Int32) + .len(8) + .add_buffer(Buffer::from_slice_ref([0, 1, 2, 3, 4, 5, 6, 7])) + .build() + .unwrap(); + + // Construct a buffer for value offsets, for the nested array: + // [[0, 1, 2], [3, 4, 5], [6, 7]] + let value_offsets = Buffer::from_slice_ref([0, 3, 6, 8]); + + // Construct a list array from the above two + let list_data_type = DataType::List(Arc::new(Field::new("item", DataType::Int32, false))); + let list_data = ArrayData::builder(list_data_type) + .len(3) + .add_buffer(value_offsets) + .add_child_data(value_data) + .build() + .unwrap(); + let list_array = ListArray::from(list_data).into(); + assert_roundtrip(&list_array).context("ListArray roundtrip failed")?; + + Ok(()) + } +} diff --git a/apis/rust/node/Cargo.toml b/apis/rust/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..015ad9adc9f5d7da19eeae8db1b17e9a3f3e689e --- /dev/null +++ b/apis/rust/node/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "dora-node-api" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +[features] +default = ["tracing"] +tracing = ["dep:dora-tracing"] + +[dependencies] +dora-core = { workspace = true } +shared-memory-server = { workspace = true } +eyre = "0.6.7" +serde_yaml = "0.8.23" +tracing = "0.1.33" +flume = "0.10.14" +bincode = "1.3.3" +shared_memory_extended = "0.13.0" +dora-tracing = { workspace = true, optional = true } +arrow = { workspace = true } +futures = "0.3.28" +futures-concurrency = "7.3.0" +futures-timer = "3.0.2" +dora-arrow-convert = { workspace = true } +aligned-vec = "0.5.0" + +[dev-dependencies] +tokio = { version = "1.24.2", features = ["rt"] } diff --git a/apis/rust/node/src/daemon_connection/mod.rs b/apis/rust/node/src/daemon_connection/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d778fb45da154a3f22bec9e734b1fb6b35b7993 --- /dev/null +++ b/apis/rust/node/src/daemon_connection/mod.rs @@ -0,0 +1,74 @@ +use dora_core::{ + config::NodeId, + daemon_messages::{DaemonReply, DaemonRequest, DataflowId, Timestamped}, + message::uhlc::Timestamp, +}; +use eyre::{bail, eyre, Context}; +use shared_memory_server::{ShmemClient, ShmemConf}; +use std::{ + net::{SocketAddr, TcpStream}, + time::Duration, +}; + +mod tcp; + +pub enum DaemonChannel { + Shmem(ShmemClient, DaemonReply>), + Tcp(TcpStream), +} + +impl DaemonChannel { + #[tracing::instrument(level = "trace")] + pub fn new_tcp(socket_addr: SocketAddr) -> eyre::Result { + let stream = TcpStream::connect(socket_addr).wrap_err("failed to open TCP connection")?; + stream.set_nodelay(true).context("failed to set nodelay")?; + Ok(DaemonChannel::Tcp(stream)) + } + + #[tracing::instrument(level = "trace")] + pub unsafe fn new_shmem(daemon_control_region_id: &str) -> eyre::Result { + let daemon_events_region = ShmemConf::new() + .os_id(daemon_control_region_id) + .open() + .wrap_err("failed to connect to dora-daemon")?; + let channel = DaemonChannel::Shmem( + unsafe { ShmemClient::new(daemon_events_region, Some(Duration::from_secs(5))) } + .wrap_err("failed to create ShmemChannel")?, + ); + Ok(channel) + } + + pub fn register( + &mut self, + dataflow_id: DataflowId, + node_id: NodeId, + timestamp: Timestamp, + ) -> eyre::Result<()> { + let msg = Timestamped { + inner: DaemonRequest::Register { + dataflow_id, + node_id, + dora_version: env!("CARGO_PKG_VERSION").to_owned(), + }, + timestamp, + }; + let reply = self + .request(&msg) + .wrap_err("failed to send register request to dora-daemon")?; + + match reply { + dora_core::daemon_messages::DaemonReply::Result(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to register node with dora-daemon")?, + other => bail!("unexpected register reply: {other:?}"), + } + Ok(()) + } + + pub fn request(&mut self, request: &Timestamped) -> eyre::Result { + match self { + DaemonChannel::Shmem(client) => client.request(request), + DaemonChannel::Tcp(stream) => tcp::request(stream, request), + } + } +} diff --git a/apis/rust/node/src/daemon_connection/tcp.rs b/apis/rust/node/src/daemon_connection/tcp.rs new file mode 100644 index 0000000000000000000000000000000000000000..62794d0a4d7e6270bb0fa05b21e70bf6143bc0e9 --- /dev/null +++ b/apis/rust/node/src/daemon_connection/tcp.rs @@ -0,0 +1,68 @@ +use dora_core::daemon_messages::{DaemonReply, DaemonRequest, Timestamped}; +use eyre::{eyre, Context}; +use std::{ + io::{Read, Write}, + net::TcpStream, +}; + +pub fn request( + connection: &mut TcpStream, + request: &Timestamped, +) -> eyre::Result { + send_message(connection, request)?; + if request.inner.expects_tcp_reply() { + receive_reply(connection) + .and_then(|reply| reply.ok_or_else(|| eyre!("server disconnected unexpectedly"))) + } else { + Ok(DaemonReply::Empty) + } +} + +fn send_message( + connection: &mut TcpStream, + message: &Timestamped, +) -> eyre::Result<()> { + let serialized = bincode::serialize(&message).wrap_err("failed to serialize DaemonRequest")?; + tcp_send(connection, &serialized).wrap_err("failed to send DaemonRequest")?; + Ok(()) +} + +fn receive_reply(connection: &mut TcpStream) -> eyre::Result> { + let raw = match tcp_receive(connection) { + Ok(raw) => raw, + Err(err) => match err.kind() { + std::io::ErrorKind::UnexpectedEof | std::io::ErrorKind::ConnectionAborted => { + return Ok(None) + } + other => { + return Err(err).with_context(|| { + format!( + "unexpected I/O error (kind {other:?}) while trying to receive DaemonReply" + ) + }) + } + }, + }; + bincode::deserialize(&raw) + .wrap_err("failed to deserialize DaemonReply") + .map(Some) +} + +fn tcp_send(connection: &mut (impl Write + Unpin), message: &[u8]) -> std::io::Result<()> { + let len_raw = (message.len() as u64).to_le_bytes(); + connection.write_all(&len_raw)?; + connection.write_all(message)?; + connection.flush()?; + Ok(()) +} + +fn tcp_receive(connection: &mut (impl Read + Unpin)) -> std::io::Result> { + let reply_len = { + let mut raw = [0; 8]; + connection.read_exact(&mut raw)?; + u64::from_le_bytes(raw) as usize + }; + let mut reply = vec![0; reply_len]; + connection.read_exact(&mut reply)?; + Ok(reply) +} diff --git a/apis/rust/node/src/event_stream/event.rs b/apis/rust/node/src/event_stream/event.rs new file mode 100644 index 0000000000000000000000000000000000000000..75b3c595b2e8faec19392aa1fd5bb6f694a226ef --- /dev/null +++ b/apis/rust/node/src/event_stream/event.rs @@ -0,0 +1,128 @@ +use std::{ptr::NonNull, sync::Arc}; + +use aligned_vec::{AVec, ConstAlign}; +use dora_arrow_convert::{ArrowData, IntoArrow}; +use dora_core::{ + config::{DataId, OperatorId}, + message::{ArrowTypeInfo, BufferOffset, Metadata}, +}; +use eyre::{Context, Result}; +use shared_memory_extended::{Shmem, ShmemConf}; + +#[derive(Debug)] +#[non_exhaustive] +pub enum Event { + Stop, + Reload { + operator_id: Option, + }, + Input { + id: DataId, + metadata: Metadata, + data: ArrowData, + }, + InputClosed { + id: DataId, + }, + Error(String), +} + +pub enum RawData { + Empty, + Vec(AVec>), + SharedMemory(SharedMemoryData), +} + +impl RawData { + pub fn into_arrow_array(self, type_info: &ArrowTypeInfo) -> Result { + let raw_buffer = match self { + RawData::Empty => return Ok(().into_arrow().into()), + RawData::Vec(data) => { + let ptr = NonNull::new(data.as_ptr() as *mut _).unwrap(); + let len = data.len(); + + unsafe { arrow::buffer::Buffer::from_custom_allocation(ptr, len, Arc::new(data)) } + } + RawData::SharedMemory(data) => { + let ptr = NonNull::new(data.data.as_ptr() as *mut _).unwrap(); + let len = data.data.len(); + + unsafe { arrow::buffer::Buffer::from_custom_allocation(ptr, len, Arc::new(data)) } + } + }; + + buffer_into_arrow_array(&raw_buffer, type_info) + } +} + +pub struct SharedMemoryData { + pub data: MappedInputData, + pub _drop: flume::Sender<()>, +} + +fn buffer_into_arrow_array( + raw_buffer: &arrow::buffer::Buffer, + type_info: &ArrowTypeInfo, +) -> eyre::Result { + if raw_buffer.is_empty() { + return Ok(arrow::array::ArrayData::new_empty(&type_info.data_type)); + } + + let mut buffers = Vec::new(); + for BufferOffset { offset, len } in &type_info.buffer_offsets { + buffers.push(raw_buffer.slice_with_length(*offset, *len)); + } + + let mut child_data = Vec::new(); + for child_type_info in &type_info.child_data { + child_data.push(buffer_into_arrow_array(raw_buffer, child_type_info)?) + } + + arrow::array::ArrayData::try_new( + type_info.data_type.clone(), + type_info.len, + type_info + .validity + .clone() + .map(arrow::buffer::Buffer::from_vec), + type_info.offset, + buffers, + child_data, + ) + .context("Error creating Arrow array") +} + +impl std::fmt::Debug for RawData { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Data").finish_non_exhaustive() + } +} + +pub struct MappedInputData { + memory: Box, + len: usize, +} + +impl MappedInputData { + pub(crate) unsafe fn map(shared_memory_id: &str, len: usize) -> eyre::Result { + let memory = Box::new( + ShmemConf::new() + .os_id(shared_memory_id) + .writable(false) + .open() + .wrap_err("failed to map shared memory input")?, + ); + Ok(MappedInputData { memory, len }) + } +} + +impl std::ops::Deref for MappedInputData { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + unsafe { &self.memory.as_slice()[..self.len] } + } +} + +unsafe impl Send for MappedInputData {} +unsafe impl Sync for MappedInputData {} diff --git a/apis/rust/node/src/event_stream/merged.rs b/apis/rust/node/src/event_stream/merged.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6c0129c8fe387bae1f16b7738b59bd296cb831d --- /dev/null +++ b/apis/rust/node/src/event_stream/merged.rs @@ -0,0 +1,115 @@ +use futures::{Stream, StreamExt}; +use futures_concurrency::stream::Merge; + +#[derive(Debug)] +pub enum MergedEvent { + Dora(super::Event), + External(E), +} + +pub enum Either { + First(A), + Second(B), +} + +impl Either { + pub fn flatten(self) -> A { + match self { + Either::First(a) => a, + Either::Second(a) => a, + } + } +} + +// TODO: use impl trait return type once stable +pub trait MergeExternal<'a, E> { + type Item; + + fn merge_external( + self, + external_events: impl Stream + Unpin + 'a, + ) -> Box + Unpin + 'a>; +} + +pub trait MergeExternalSend<'a, E> { + type Item; + + fn merge_external_send( + self, + external_events: impl Stream + Unpin + Send + 'a, + ) -> Box + Unpin + Send + 'a>; +} + +impl<'a, E> MergeExternal<'a, E> for super::EventStream +where + E: 'static, +{ + type Item = MergedEvent; + + fn merge_external( + self, + external_events: impl Stream + Unpin + 'a, + ) -> Box + Unpin + 'a> { + let dora = self.map(MergedEvent::Dora); + let external = external_events.map(MergedEvent::External); + Box::new((dora, external).merge()) + } +} + +impl<'a, E> MergeExternalSend<'a, E> for super::EventStream +where + E: 'static, +{ + type Item = MergedEvent; + + fn merge_external_send( + self, + external_events: impl Stream + Unpin + Send + 'a, + ) -> Box + Unpin + Send + 'a> { + let dora = self.map(MergedEvent::Dora); + let external = external_events.map(MergedEvent::External); + Box::new((dora, external).merge()) + } +} + +impl<'a, E, F, S> MergeExternal<'a, F> for S +where + S: Stream> + Unpin + 'a, + E: 'a, + F: 'a, +{ + type Item = MergedEvent>; + + fn merge_external( + self, + external_events: impl Stream + Unpin + 'a, + ) -> Box + Unpin + 'a> { + let first = self.map(|e| match e { + MergedEvent::Dora(d) => MergedEvent::Dora(d), + MergedEvent::External(e) => MergedEvent::External(Either::First(e)), + }); + let second = external_events.map(|e| MergedEvent::External(Either::Second(e))); + Box::new((first, second).merge()) + } +} + +impl<'a, E, F, S> MergeExternalSend<'a, F> for S +where + S: Stream> + Unpin + Send + 'a, + E: 'a, + F: 'a, +{ + type Item = MergedEvent>; + + fn merge_external_send( + self, + external_events: impl Stream + Unpin + Send + 'a, + ) -> Box + Unpin + Send + 'a> { + let first = self.map(|e| match e { + MergedEvent::Dora(d) => MergedEvent::Dora(d), + MergedEvent::External(e) => MergedEvent::External(Either::First(e)), + }); + let second = external_events.map(|e| MergedEvent::External(Either::Second(e))); + Box::new((first, second).merge()) + } +} diff --git a/apis/rust/node/src/event_stream/mod.rs b/apis/rust/node/src/event_stream/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9575a8d7b798c466bcde243734aa76a50fa81c97 --- /dev/null +++ b/apis/rust/node/src/event_stream/mod.rs @@ -0,0 +1,226 @@ +use std::{sync::Arc, time::Duration}; + +pub use event::{Event, MappedInputData, RawData}; +use futures::{ + future::{select, Either}, + Stream, StreamExt, +}; +use futures_timer::Delay; + +use self::{ + event::SharedMemoryData, + thread::{EventItem, EventStreamThreadHandle}, +}; +use crate::daemon_connection::DaemonChannel; +use dora_core::{ + config::NodeId, + daemon_messages::{ + self, DaemonCommunication, DaemonRequest, DataflowId, NodeEvent, Timestamped, + }, + message::uhlc, +}; +use eyre::{eyre, Context}; + +mod event; +pub mod merged; +mod thread; + +pub struct EventStream { + node_id: NodeId, + receiver: flume::r#async::RecvStream<'static, EventItem>, + _thread_handle: EventStreamThreadHandle, + close_channel: DaemonChannel, + clock: Arc, +} + +impl EventStream { + #[tracing::instrument(level = "trace", skip(clock))] + pub(crate) fn init( + dataflow_id: DataflowId, + node_id: &NodeId, + daemon_communication: &DaemonCommunication, + clock: Arc, + ) -> eyre::Result { + let channel = match daemon_communication { + DaemonCommunication::Shmem { + daemon_events_region_id, + .. + } => unsafe { DaemonChannel::new_shmem(daemon_events_region_id) }.wrap_err_with( + || format!("failed to create shmem event stream for node `{node_id}`"), + )?, + DaemonCommunication::Tcp { socket_addr } => DaemonChannel::new_tcp(*socket_addr) + .wrap_err_with(|| format!("failed to connect event stream for node `{node_id}`"))?, + }; + + let close_channel = match daemon_communication { + DaemonCommunication::Shmem { + daemon_events_close_region_id, + .. + } => unsafe { DaemonChannel::new_shmem(daemon_events_close_region_id) }.wrap_err_with( + || format!("failed to create shmem event close channel for node `{node_id}`"), + )?, + DaemonCommunication::Tcp { socket_addr } => DaemonChannel::new_tcp(*socket_addr) + .wrap_err_with(|| { + format!("failed to connect event close channel for node `{node_id}`") + })?, + }; + + Self::init_on_channel(dataflow_id, node_id, channel, close_channel, clock) + } + + pub(crate) fn init_on_channel( + dataflow_id: DataflowId, + node_id: &NodeId, + mut channel: DaemonChannel, + mut close_channel: DaemonChannel, + clock: Arc, + ) -> eyre::Result { + channel.register(dataflow_id, node_id.clone(), clock.new_timestamp())?; + let reply = channel + .request(&Timestamped { + inner: DaemonRequest::Subscribe, + timestamp: clock.new_timestamp(), + }) + .map_err(|e| eyre!(e)) + .wrap_err("failed to create subscription with dora-daemon")?; + + match reply { + daemon_messages::DaemonReply::Result(Ok(())) => {} + daemon_messages::DaemonReply::Result(Err(err)) => { + eyre::bail!("subscribe failed: {err}") + } + other => eyre::bail!("unexpected subscribe reply: {other:?}"), + } + + close_channel.register(dataflow_id, node_id.clone(), clock.new_timestamp())?; + + let (tx, rx) = flume::bounded(0); + let thread_handle = thread::init(node_id.clone(), tx, channel, clock.clone())?; + + Ok(EventStream { + node_id: node_id.clone(), + receiver: rx.into_stream(), + _thread_handle: thread_handle, + close_channel, + clock, + }) + } + + /// wait for the next event on the events stream. + pub fn recv(&mut self) -> Option { + futures::executor::block_on(self.recv_async()) + } + + /// wait for the next event on the events stream until timeout + pub fn recv_timeout(&mut self, dur: Duration) -> Option { + futures::executor::block_on(self.recv_async_timeout(dur)) + } + + pub async fn recv_async(&mut self) -> Option { + self.receiver.next().await.map(Self::convert_event_item) + } + + pub async fn recv_async_timeout(&mut self, dur: Duration) -> Option { + let next_event = match select(Delay::new(dur), self.receiver.next()).await { + Either::Left((_elapsed, _)) => { + Some(EventItem::TimeoutError(eyre!("Receiver timed out"))) + } + Either::Right((event, _)) => event, + }; + next_event.map(Self::convert_event_item) + } + + fn convert_event_item(item: EventItem) -> Event { + match item { + EventItem::NodeEvent { event, ack_channel } => match event { + NodeEvent::Stop => Event::Stop, + NodeEvent::Reload { operator_id } => Event::Reload { operator_id }, + NodeEvent::InputClosed { id } => Event::InputClosed { id }, + NodeEvent::Input { id, metadata, data } => { + let data = match data { + None => Ok(None), + Some(daemon_messages::DataMessage::Vec(v)) => Ok(Some(RawData::Vec(v))), + Some(daemon_messages::DataMessage::SharedMemory { + shared_memory_id, + len, + drop_token: _, // handled in `event_stream_loop` + }) => unsafe { + MappedInputData::map(&shared_memory_id, len).map(|data| { + Some(RawData::SharedMemory(SharedMemoryData { + data, + _drop: ack_channel, + })) + }) + }, + }; + let data = data.and_then(|data| { + let raw_data = data.unwrap_or(RawData::Empty); + raw_data + .into_arrow_array(&metadata.type_info) + .map(arrow::array::make_array) + }); + match data { + Ok(data) => Event::Input { + id, + metadata, + data: data.into(), + }, + Err(err) => Event::Error(format!("{err:?}")), + } + } + NodeEvent::AllInputsClosed => { + let err = eyre!( + "received `AllInputsClosed` event, which should be handled by background task" + ); + tracing::error!("{err:?}"); + Event::Error(err.wrap_err("internal error").to_string()) + } + }, + + EventItem::FatalError(err) => { + Event::Error(format!("fatal event stream error: {err:?}")) + } + EventItem::TimeoutError(err) => { + Event::Error(format!("Timeout event stream error: {err:?}")) + } + } + } +} + +impl Stream for EventStream { + type Item = Event; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.receiver + .poll_next_unpin(cx) + .map(|item| item.map(Self::convert_event_item)) + } +} + +impl Drop for EventStream { + #[tracing::instrument(skip(self), fields(%self.node_id))] + fn drop(&mut self) { + let request = Timestamped { + inner: DaemonRequest::EventStreamDropped, + timestamp: self.clock.new_timestamp(), + }; + let result = self + .close_channel + .request(&request) + .map_err(|e| eyre!(e)) + .wrap_err("failed to signal event stream closure to dora-daemon") + .and_then(|r| match r { + daemon_messages::DaemonReply::Result(Ok(())) => Ok(()), + daemon_messages::DaemonReply::Result(Err(err)) => { + Err(eyre!("EventStreamClosed failed: {err}")) + } + other => Err(eyre!("unexpected EventStreamClosed reply: {other:?}")), + }); + if let Err(err) = result { + tracing::warn!("{err:?}") + } + } +} diff --git a/apis/rust/node/src/event_stream/thread.rs b/apis/rust/node/src/event_stream/thread.rs new file mode 100644 index 0000000000000000000000000000000000000000..bee8cc22121154f3b8398ab2e9f63c8a4d859cd7 --- /dev/null +++ b/apis/rust/node/src/event_stream/thread.rs @@ -0,0 +1,269 @@ +use dora_core::{ + config::NodeId, + daemon_messages::{DaemonReply, DaemonRequest, DropToken, NodeEvent, Timestamped}, + message::uhlc::{self, Timestamp}, +}; +use eyre::{eyre, Context}; +use flume::RecvTimeoutError; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use crate::daemon_connection::DaemonChannel; + +pub fn init( + node_id: NodeId, + tx: flume::Sender, + channel: DaemonChannel, + clock: Arc, +) -> eyre::Result { + let node_id_cloned = node_id.clone(); + let join_handle = std::thread::spawn(|| event_stream_loop(node_id_cloned, tx, channel, clock)); + Ok(EventStreamThreadHandle::new(node_id, join_handle)) +} + +#[derive(Debug)] +pub enum EventItem { + NodeEvent { + event: NodeEvent, + ack_channel: flume::Sender<()>, + }, + FatalError(eyre::Report), + TimeoutError(eyre::Report), +} + +pub struct EventStreamThreadHandle { + node_id: NodeId, + handle: flume::Receiver>, +} + +impl EventStreamThreadHandle { + fn new(node_id: NodeId, join_handle: std::thread::JoinHandle<()>) -> Self { + let (tx, rx) = flume::bounded(1); + std::thread::spawn(move || { + let _ = tx.send(join_handle.join()); + }); + Self { + node_id, + handle: rx, + } + } +} + +impl Drop for EventStreamThreadHandle { + #[tracing::instrument(skip(self), fields(node_id = %self.node_id))] + fn drop(&mut self) { + if self.handle.is_empty() { + tracing::trace!("waiting for event stream thread"); + } + match self.handle.recv_timeout(Duration::from_secs(20)) { + Ok(Ok(())) => { + tracing::trace!("event stream thread finished"); + } + Ok(Err(_)) => { + tracing::error!("event stream thread panicked"); + } + Err(RecvTimeoutError::Timeout) => { + tracing::warn!("timeout while waiting for event stream thread"); + } + Err(RecvTimeoutError::Disconnected) => { + tracing::warn!("event stream thread result channel closed unexpectedly"); + } + } + } +} + +#[tracing::instrument(skip(tx, channel, clock))] +fn event_stream_loop( + node_id: NodeId, + tx: flume::Sender, + mut channel: DaemonChannel, + clock: Arc, +) { + let mut tx = Some(tx); + let mut pending_drop_tokens: Vec<(DropToken, flume::Receiver<()>, Instant, u64)> = Vec::new(); + let mut drop_tokens = Vec::new(); + + let result = 'outer: loop { + if let Err(err) = handle_pending_drop_tokens(&mut pending_drop_tokens, &mut drop_tokens) { + break 'outer Err(err); + } + + let daemon_request = Timestamped { + inner: DaemonRequest::NextEvent { + drop_tokens: std::mem::take(&mut drop_tokens), + }, + timestamp: clock.new_timestamp(), + }; + let events = match channel.request(&daemon_request) { + Ok(DaemonReply::NextEvents(events)) => { + if events.is_empty() { + tracing::trace!("event stream closed for node `{node_id}`"); + break Ok(()); + } else { + events + } + } + Ok(other) => { + let err = eyre!("unexpected control reply: {other:?}"); + tracing::warn!("{err:?}"); + continue; + } + Err(err) => { + let err = eyre!(err).wrap_err("failed to receive incoming event"); + tracing::warn!("{err:?}"); + continue; + } + }; + for Timestamped { inner, timestamp } in events { + if let Err(err) = clock.update_with_timestamp(×tamp) { + tracing::warn!("failed to update HLC: {err}"); + } + let drop_token = match &inner { + NodeEvent::Input { + data: Some(data), .. + } => data.drop_token(), + NodeEvent::AllInputsClosed => { + // close the event stream + tx = None; + // skip this internal event + continue; + } + _ => None, + }; + + if let Some(tx) = tx.as_ref() { + let (drop_tx, drop_rx) = flume::bounded(0); + match tx.send(EventItem::NodeEvent { + event: inner, + ack_channel: drop_tx, + }) { + Ok(()) => {} + Err(send_error) => { + let event = send_error.into_inner(); + tracing::trace!( + "event channel was closed already, could not forward `{event:?}`" + ); + + break 'outer Ok(()); + } + } + + if let Some(token) = drop_token { + pending_drop_tokens.push((token, drop_rx, Instant::now(), 1)); + } + } else { + tracing::warn!("dropping event because event `tx` was already closed: `{inner:?}`"); + } + } + }; + if let Err(err) = result { + if let Some(tx) = tx.as_ref() { + if let Err(flume::SendError(item)) = tx.send(EventItem::FatalError(err)) { + let err = match item { + EventItem::FatalError(err) => err, + _ => unreachable!(), + }; + tracing::error!("failed to report fatal EventStream error: {err:?}"); + } + } else { + tracing::error!("received error event after `tx` was closed: {err:?}"); + } + } + + if let Err(err) = report_remaining_drop_tokens( + channel, + drop_tokens, + pending_drop_tokens, + clock.new_timestamp(), + ) + .context("failed to report remaining drop tokens") + { + tracing::warn!("{err:?}"); + } +} + +fn handle_pending_drop_tokens( + pending_drop_tokens: &mut Vec<(DropToken, flume::Receiver<()>, Instant, u64)>, + drop_tokens: &mut Vec, +) -> eyre::Result<()> { + let mut still_pending = Vec::new(); + for (token, rx, since, warn) in pending_drop_tokens.drain(..) { + match rx.try_recv() { + Ok(()) => return Err(eyre!("Node API should not send anything on ACK channel")), + Err(flume::TryRecvError::Disconnected) => { + // the event was dropped -> add the drop token to the list + drop_tokens.push(token); + } + Err(flume::TryRecvError::Empty) => { + let duration = Duration::from_secs(30 * warn); + if since.elapsed() > duration { + tracing::warn!("timeout: token {token:?} was not dropped after {duration:?}"); + } + still_pending.push((token, rx, since, warn + 1)); + } + } + } + *pending_drop_tokens = still_pending; + Ok(()) +} + +fn report_remaining_drop_tokens( + mut channel: DaemonChannel, + mut drop_tokens: Vec, + mut pending_drop_tokens: Vec<(DropToken, flume::Receiver<()>, Instant, u64)>, + timestamp: Timestamp, +) -> eyre::Result<()> { + while !(pending_drop_tokens.is_empty() && drop_tokens.is_empty()) { + report_drop_tokens(&mut drop_tokens, &mut channel, timestamp)?; + + let mut still_pending = Vec::new(); + for (token, rx, since, _) in pending_drop_tokens.drain(..) { + match rx.recv_timeout(Duration::from_millis(100)) { + Ok(()) => return Err(eyre!("Node API should not send anything on ACK channel")), + Err(flume::RecvTimeoutError::Disconnected) => { + // the event was dropped -> add the drop token to the list + drop_tokens.push(token); + } + Err(flume::RecvTimeoutError::Timeout) => { + let duration = Duration::from_secs(30); + if since.elapsed() > duration { + tracing::warn!( + "timeout: node finished, but token {token:?} was still not \ + dropped after {duration:?} -> ignoring it" + ); + } else { + still_pending.push((token, rx, since, 0)); + } + } + } + } + pending_drop_tokens = still_pending; + if !pending_drop_tokens.is_empty() { + tracing::trace!("waiting for drop for {} events", pending_drop_tokens.len()); + } + } + + Ok(()) +} + +fn report_drop_tokens( + drop_tokens: &mut Vec, + channel: &mut DaemonChannel, + timestamp: Timestamp, +) -> Result<(), eyre::ErrReport> { + if drop_tokens.is_empty() { + return Ok(()); + } + let daemon_request = Timestamped { + inner: DaemonRequest::ReportDropTokens { + drop_tokens: std::mem::take(drop_tokens), + }, + timestamp, + }; + match channel.request(&daemon_request)? { + dora_core::daemon_messages::DaemonReply::Empty => Ok(()), + other => Err(eyre!("unexpected ReportDropTokens reply: {other:?}")), + } +} diff --git a/apis/rust/node/src/lib.rs b/apis/rust/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d2ce63dd23414d3983e121d15e364eca7ddb9d56 --- /dev/null +++ b/apis/rust/node/src/lib.rs @@ -0,0 +1,26 @@ +//! The custom node API allow you to integrate `dora` into your application. +//! It allows you to retrieve input and send output in any fashion you want. +//! +//! Try it out with: +//! +//! ```bash +//! dora new node --kind node +//! ``` +//! +//! You can also generate a dora rust project with +//! +//! ```bash +//! dora new project_xyz --kind dataflow +//! ``` +//! +pub use arrow; +pub use dora_arrow_convert::*; +pub use dora_core; +pub use dora_core::message::{uhlc, Metadata, MetadataParameters}; +pub use event_stream::{merged, Event, EventStream, MappedInputData, RawData}; +pub use flume::Receiver; +pub use node::{arrow_utils, DataSample, DoraNode, ZERO_COPY_THRESHOLD}; + +mod daemon_connection; +mod event_stream; +mod node; diff --git a/apis/rust/node/src/node/arrow_utils.rs b/apis/rust/node/src/node/arrow_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..8deb3ca2f2694c0bc8afd1008186013f17eda186 --- /dev/null +++ b/apis/rust/node/src/node/arrow_utils.rs @@ -0,0 +1,71 @@ +use arrow::array::{ArrayData, BufferSpec}; +use dora_core::message::{ArrowTypeInfo, BufferOffset}; + +pub fn required_data_size(array: &ArrayData) -> usize { + let mut next_offset = 0; + required_data_size_inner(array, &mut next_offset); + next_offset +} +fn required_data_size_inner(array: &ArrayData, next_offset: &mut usize) { + let layout = arrow::array::layout(array.data_type()); + for (buffer, spec) in array.buffers().iter().zip(&layout.buffers) { + // consider alignment padding + if let BufferSpec::FixedWidth { alignment, .. } = spec { + *next_offset = (*next_offset + alignment - 1) / alignment * alignment; + } + *next_offset += buffer.len(); + } + for child in array.child_data() { + required_data_size_inner(child, next_offset); + } +} + +pub fn copy_array_into_sample(target_buffer: &mut [u8], arrow_array: &ArrayData) -> ArrowTypeInfo { + let mut next_offset = 0; + copy_array_into_sample_inner(target_buffer, &mut next_offset, arrow_array) +} + +fn copy_array_into_sample_inner( + target_buffer: &mut [u8], + next_offset: &mut usize, + arrow_array: &ArrayData, +) -> ArrowTypeInfo { + let mut buffer_offsets = Vec::new(); + let layout = arrow::array::layout(arrow_array.data_type()); + for (buffer, spec) in arrow_array.buffers().iter().zip(&layout.buffers) { + let len = buffer.len(); + assert!( + target_buffer[*next_offset..].len() >= len, + "target buffer too small (total_len: {}, offset: {}, required_len: {len})", + target_buffer.len(), + *next_offset, + ); + // add alignment padding + if let BufferSpec::FixedWidth { alignment, .. } = spec { + *next_offset = (*next_offset + alignment - 1) / alignment * alignment; + } + + target_buffer[*next_offset..][..len].copy_from_slice(buffer.as_slice()); + buffer_offsets.push(BufferOffset { + offset: *next_offset, + len, + }); + *next_offset += len; + } + + let mut child_data = Vec::new(); + for child in arrow_array.child_data() { + let child_type_info = copy_array_into_sample_inner(target_buffer, next_offset, child); + child_data.push(child_type_info); + } + + ArrowTypeInfo { + data_type: arrow_array.data_type().clone(), + len: arrow_array.len(), + null_count: arrow_array.null_count(), + validity: arrow_array.nulls().map(|b| b.validity().to_owned()), + offset: arrow_array.offset(), + buffer_offsets, + child_data, + } +} diff --git a/apis/rust/node/src/node/control_channel.rs b/apis/rust/node/src/node/control_channel.rs new file mode 100644 index 0000000000000000000000000000000000000000..69329578904b518316cfd78fe99f311935b82dd1 --- /dev/null +++ b/apis/rust/node/src/node/control_channel.rs @@ -0,0 +1,106 @@ +use std::sync::Arc; + +use crate::daemon_connection::DaemonChannel; +use dora_core::{ + config::{DataId, NodeId}, + daemon_messages::{DaemonCommunication, DaemonRequest, DataMessage, DataflowId, Timestamped}, + message::{uhlc::HLC, Metadata}, +}; +use eyre::{bail, eyre, Context}; + +pub(crate) struct ControlChannel { + channel: DaemonChannel, + clock: Arc, +} + +impl ControlChannel { + #[tracing::instrument(level = "trace", skip(clock))] + pub(crate) fn init( + dataflow_id: DataflowId, + node_id: &NodeId, + daemon_communication: &DaemonCommunication, + clock: Arc, + ) -> eyre::Result { + let channel = match daemon_communication { + DaemonCommunication::Shmem { + daemon_control_region_id, + .. + } => unsafe { DaemonChannel::new_shmem(daemon_control_region_id) } + .wrap_err("failed to create shmem control channel")?, + DaemonCommunication::Tcp { socket_addr } => DaemonChannel::new_tcp(*socket_addr) + .wrap_err("failed to connect control channel")?, + }; + + Self::init_on_channel(dataflow_id, node_id, channel, clock) + } + + #[tracing::instrument(skip(channel, clock), level = "trace")] + pub fn init_on_channel( + dataflow_id: DataflowId, + node_id: &NodeId, + mut channel: DaemonChannel, + clock: Arc, + ) -> eyre::Result { + channel.register(dataflow_id, node_id.clone(), clock.new_timestamp())?; + + Ok(Self { channel, clock }) + } + + pub fn report_outputs_done(&mut self) -> eyre::Result<()> { + let reply = self + .channel + .request(&Timestamped { + inner: DaemonRequest::OutputsDone, + timestamp: self.clock.new_timestamp(), + }) + .wrap_err("failed to report outputs done to dora-daemon")?; + match reply { + dora_core::daemon_messages::DaemonReply::Result(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to report outputs done event to dora-daemon")?, + other => bail!("unexpected outputs done reply: {other:?}"), + } + Ok(()) + } + + pub fn report_closed_outputs(&mut self, outputs: Vec) -> eyre::Result<()> { + let reply = self + .channel + .request(&Timestamped { + inner: DaemonRequest::CloseOutputs(outputs), + timestamp: self.clock.new_timestamp(), + }) + .wrap_err("failed to report closed outputs to dora-daemon")?; + match reply { + dora_core::daemon_messages::DaemonReply::Result(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to receive closed outputs reply from dora-daemon")?, + other => bail!("unexpected closed outputs reply: {other:?}"), + } + Ok(()) + } + + pub fn send_message( + &mut self, + output_id: DataId, + metadata: Metadata, + data: Option, + ) -> eyre::Result<()> { + let request = DaemonRequest::SendMessage { + output_id, + metadata, + data, + }; + let reply = self + .channel + .request(&Timestamped { + inner: request, + timestamp: self.clock.new_timestamp(), + }) + .wrap_err("failed to send SendMessage request to dora-daemon")?; + match reply { + dora_core::daemon_messages::DaemonReply::Empty => Ok(()), + other => bail!("unexpected SendMessage reply: {other:?}"), + } + } +} diff --git a/apis/rust/node/src/node/drop_stream.rs b/apis/rust/node/src/node/drop_stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..efe6c796426e8d819b81015feedfe50b43b743ca --- /dev/null +++ b/apis/rust/node/src/node/drop_stream.rs @@ -0,0 +1,178 @@ +use std::{sync::Arc, time::Duration}; + +use crate::daemon_connection::DaemonChannel; +use dora_core::{ + config::NodeId, + daemon_messages::{ + self, DaemonCommunication, DaemonReply, DaemonRequest, DataflowId, DropToken, + NodeDropEvent, Timestamped, + }, + message::uhlc, +}; +use eyre::{eyre, Context}; +use flume::RecvTimeoutError; + +pub struct DropStream { + receiver: flume::Receiver, + _thread_handle: DropStreamThreadHandle, +} + +impl DropStream { + #[tracing::instrument(level = "trace", skip(hlc))] + pub(crate) fn init( + dataflow_id: DataflowId, + node_id: &NodeId, + daemon_communication: &DaemonCommunication, + hlc: Arc, + ) -> eyre::Result { + let channel = match daemon_communication { + DaemonCommunication::Shmem { + daemon_drop_region_id, + .. + } => { + unsafe { DaemonChannel::new_shmem(daemon_drop_region_id) }.wrap_err_with(|| { + format!("failed to create shmem drop stream for node `{node_id}`") + })? + } + DaemonCommunication::Tcp { socket_addr } => DaemonChannel::new_tcp(*socket_addr) + .wrap_err_with(|| format!("failed to connect drop stream for node `{node_id}`"))?, + }; + + Self::init_on_channel(dataflow_id, node_id, channel, hlc) + } + + pub fn init_on_channel( + dataflow_id: DataflowId, + node_id: &NodeId, + mut channel: DaemonChannel, + clock: Arc, + ) -> eyre::Result { + channel.register(dataflow_id, node_id.clone(), clock.new_timestamp())?; + + let reply = channel + .request(&Timestamped { + inner: DaemonRequest::SubscribeDrop, + timestamp: clock.new_timestamp(), + }) + .map_err(|e| eyre!(e)) + .wrap_err("failed to create subscription with dora-daemon")?; + + match reply { + daemon_messages::DaemonReply::Result(Ok(())) => {} + daemon_messages::DaemonReply::Result(Err(err)) => { + eyre::bail!("drop subscribe failed: {err}") + } + other => eyre::bail!("unexpected drop subscribe reply: {other:?}"), + } + + let (tx, rx) = flume::bounded(0); + let node_id_cloned = node_id.clone(); + + let handle = std::thread::spawn(|| drop_stream_loop(node_id_cloned, tx, channel, clock)); + + Ok(Self { + receiver: rx, + _thread_handle: DropStreamThreadHandle::new(node_id.clone(), handle), + }) + } +} + +impl std::ops::Deref for DropStream { + type Target = flume::Receiver; + + fn deref(&self) -> &Self::Target { + &self.receiver + } +} + +#[tracing::instrument(skip(tx, channel, clock))] +fn drop_stream_loop( + node_id: NodeId, + tx: flume::Sender, + mut channel: DaemonChannel, + clock: Arc, +) { + 'outer: loop { + let daemon_request = Timestamped { + inner: DaemonRequest::NextFinishedDropTokens, + timestamp: clock.new_timestamp(), + }; + let events = match channel.request(&daemon_request) { + Ok(DaemonReply::NextDropEvents(events)) => { + if events.is_empty() { + tracing::trace!("drop stream closed for node `{node_id}`"); + break; + } else { + events + } + } + Ok(other) => { + let err = eyre!("unexpected drop reply: {other:?}"); + tracing::warn!("{err:?}"); + continue; + } + Err(err) => { + let err = eyre!(err).wrap_err("failed to receive incoming drop event"); + tracing::warn!("{err:?}"); + continue; + } + }; + for Timestamped { inner, timestamp } in events { + if let Err(err) = clock.update_with_timestamp(×tamp) { + tracing::warn!("failed to update HLC: {err}"); + } + match inner { + NodeDropEvent::OutputDropped { drop_token } => { + if tx.send(drop_token).is_err() { + tracing::warn!( + "drop channel was closed already, could not forward \ + drop token`{drop_token:?}`" + ); + break 'outer; + } + } + } + } + } +} + +struct DropStreamThreadHandle { + node_id: NodeId, + handle: flume::Receiver>, +} + +impl DropStreamThreadHandle { + fn new(node_id: NodeId, join_handle: std::thread::JoinHandle<()>) -> Self { + let (tx, rx) = flume::bounded(1); + std::thread::spawn(move || { + let _ = tx.send(join_handle.join()); + }); + Self { + node_id, + handle: rx, + } + } +} + +impl Drop for DropStreamThreadHandle { + #[tracing::instrument(skip(self), fields(node_id = %self.node_id))] + fn drop(&mut self) { + if self.handle.is_empty() { + tracing::trace!("waiting for drop stream thread"); + } + match self.handle.recv_timeout(Duration::from_secs(2)) { + Ok(Ok(())) => { + tracing::trace!("drop stream thread done"); + } + Ok(Err(_)) => { + tracing::error!("drop stream thread panicked"); + } + Err(RecvTimeoutError::Timeout) => { + tracing::warn!("timeout while waiting for drop stream thread"); + } + Err(RecvTimeoutError::Disconnected) => { + tracing::warn!("drop stream thread result channel closed unexpectedly"); + } + } + } +} diff --git a/apis/rust/node/src/node/mod.rs b/apis/rust/node/src/node/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2b3e4e176c138d98a50352a8871d24799c9b3286 --- /dev/null +++ b/apis/rust/node/src/node/mod.rs @@ -0,0 +1,475 @@ +use crate::EventStream; + +use self::{ + arrow_utils::{copy_array_into_sample, required_data_size}, + control_channel::ControlChannel, + drop_stream::DropStream, +}; +use aligned_vec::{AVec, ConstAlign}; +use arrow::array::Array; +use dora_core::{ + config::{DataId, NodeId, NodeRunConfig}, + daemon_messages::{DataMessage, DataflowId, DropToken, NodeConfig}, + descriptor::Descriptor, + message::{uhlc, ArrowTypeInfo, Metadata, MetadataParameters}, +}; +use eyre::{bail, WrapErr}; +use shared_memory_extended::{Shmem, ShmemConf}; +use std::{ + collections::{HashMap, VecDeque}, + ops::{Deref, DerefMut}, + sync::Arc, + time::Duration, +}; + +#[cfg(feature = "tracing")] +use dora_tracing::set_up_tracing; + +pub mod arrow_utils; +mod control_channel; +mod drop_stream; + +pub const ZERO_COPY_THRESHOLD: usize = 4096; + +pub struct DoraNode { + id: NodeId, + dataflow_id: DataflowId, + node_config: NodeRunConfig, + control_channel: ControlChannel, + clock: Arc, + + sent_out_shared_memory: HashMap, + drop_stream: DropStream, + cache: VecDeque, + + dataflow_descriptor: Descriptor, +} + +impl DoraNode { + /// Initiate a node from environment variables set by `dora-coordinator` + /// + /// ```no_run + /// use dora_node_api::DoraNode; + /// + /// let (mut node, mut events) = DoraNode::init_from_env().expect("Could not init node."); + /// ``` + /// + pub fn init_from_env() -> eyre::Result<(Self, EventStream)> { + let node_config: NodeConfig = { + let raw = std::env::var("DORA_NODE_CONFIG") + .wrap_err("env variable DORA_NODE_CONFIG must be set")?; + serde_yaml::from_str(&raw).context("failed to deserialize operator config")? + }; + #[cfg(feature = "tracing")] + set_up_tracing(&node_config.node_id.to_string()) + .context("failed to set up tracing subscriber")?; + Self::init(node_config) + } + + #[tracing::instrument] + pub fn init(node_config: NodeConfig) -> eyre::Result<(Self, EventStream)> { + let NodeConfig { + dataflow_id, + node_id, + run_config, + daemon_communication, + dataflow_descriptor, + } = node_config; + + let clock = Arc::new(uhlc::HLC::default()); + + let event_stream = + EventStream::init(dataflow_id, &node_id, &daemon_communication, clock.clone()) + .wrap_err("failed to init event stream")?; + let drop_stream = + DropStream::init(dataflow_id, &node_id, &daemon_communication, clock.clone()) + .wrap_err("failed to init drop stream")?; + let control_channel = + ControlChannel::init(dataflow_id, &node_id, &daemon_communication, clock.clone()) + .wrap_err("failed to init control channel")?; + + let node = Self { + id: node_id, + dataflow_id, + node_config: run_config, + control_channel, + clock, + sent_out_shared_memory: HashMap::new(), + drop_stream, + cache: VecDeque::new(), + + dataflow_descriptor, + }; + Ok((node, event_stream)) + } + + /// Send data from the node to the other nodes. + /// We take a closure as an input to enable zero copy on send. + /// + /// ```no_run + /// use dora_node_api::{DoraNode, MetadataParameters}; + /// use dora_core::config::DataId; + /// + /// let (mut node, mut events) = DoraNode::init_from_env().expect("Could not init node."); + /// + /// let output = DataId::from("output_id".to_owned()); + /// + /// let data: &[u8] = &[0, 1, 2, 3]; + /// let parameters = MetadataParameters::default(); + /// + /// node.send_output_raw( + /// output, + /// parameters, + /// data.len(), + /// |out| { + /// out.copy_from_slice(data); + /// }).expect("Could not send output"); + /// ``` + /// + pub fn send_output_raw( + &mut self, + output_id: DataId, + parameters: MetadataParameters, + data_len: usize, + data: F, + ) -> eyre::Result<()> + where + F: FnOnce(&mut [u8]), + { + let mut sample = self.allocate_data_sample(data_len)?; + data(&mut sample); + + let type_info = ArrowTypeInfo::byte_array(data_len); + + self.send_output_sample(output_id, type_info, parameters, Some(sample)) + } + + pub fn send_output( + &mut self, + output_id: DataId, + parameters: MetadataParameters, + data: impl Array, + ) -> eyre::Result<()> { + let arrow_array = data.to_data(); + + let total_len = required_data_size(&arrow_array); + + let mut sample = self.allocate_data_sample(total_len)?; + let type_info = copy_array_into_sample(&mut sample, &arrow_array); + + self.send_output_sample(output_id, type_info, parameters, Some(sample)) + .wrap_err("failed to send output")?; + + Ok(()) + } + + pub fn send_output_bytes( + &mut self, + output_id: DataId, + parameters: MetadataParameters, + data_len: usize, + data: &[u8], + ) -> eyre::Result<()> { + self.send_output_raw(output_id, parameters, data_len, |sample| { + sample.copy_from_slice(data) + }) + } + + pub fn send_typed_output( + &mut self, + output_id: DataId, + type_info: ArrowTypeInfo, + parameters: MetadataParameters, + data_len: usize, + data: F, + ) -> eyre::Result<()> + where + F: FnOnce(&mut [u8]), + { + let mut sample = self.allocate_data_sample(data_len)?; + data(&mut sample); + + self.send_output_sample(output_id, type_info, parameters, Some(sample)) + } + + pub fn send_output_sample( + &mut self, + output_id: DataId, + type_info: ArrowTypeInfo, + parameters: MetadataParameters, + sample: Option, + ) -> eyre::Result<()> { + self.handle_finished_drop_tokens()?; + + if !self.node_config.outputs.contains(&output_id) { + eyre::bail!("unknown output"); + } + let metadata = Metadata::from_parameters( + self.clock.new_timestamp(), + type_info, + parameters.into_owned(), + ); + + let (data, shmem) = match sample { + Some(sample) => sample.finalize(), + None => (None, None), + }; + + self.control_channel + .send_message(output_id.clone(), metadata, data) + .wrap_err_with(|| format!("failed to send output {output_id}"))?; + + if let Some((shared_memory, drop_token)) = shmem { + self.sent_out_shared_memory + .insert(drop_token, shared_memory); + } + + Ok(()) + } + + pub fn close_outputs(&mut self, outputs: Vec) -> eyre::Result<()> { + for output_id in &outputs { + if !self.node_config.outputs.remove(output_id) { + eyre::bail!("unknown output {output_id}"); + } + } + + self.control_channel + .report_closed_outputs(outputs) + .wrap_err("failed to report closed outputs to daemon")?; + + Ok(()) + } + + pub fn id(&self) -> &NodeId { + &self.id + } + + pub fn dataflow_id(&self) -> &DataflowId { + &self.dataflow_id + } + + pub fn node_config(&self) -> &NodeRunConfig { + &self.node_config + } + + pub fn allocate_data_sample(&mut self, data_len: usize) -> eyre::Result { + let data = if data_len >= ZERO_COPY_THRESHOLD { + // create shared memory region + let shared_memory = self.allocate_shared_memory(data_len)?; + + DataSample { + inner: DataSampleInner::Shmem(shared_memory), + len: data_len, + } + } else { + let avec: AVec> = AVec::__from_elem(128, 0, data_len); + + avec.into() + }; + + Ok(data) + } + + fn allocate_shared_memory(&mut self, data_len: usize) -> eyre::Result { + let cache_index = self + .cache + .iter() + .enumerate() + .rev() + .filter(|(_, s)| s.len() >= data_len) + .min_by_key(|(_, s)| s.len()) + .map(|(i, _)| i); + let memory = match cache_index { + Some(i) => { + // we know that this index exists, so we can safely unwrap here + self.cache.remove(i).unwrap() + } + None => ShmemHandle(Box::new( + ShmemConf::new() + .size(data_len) + .writable(true) + .create() + .wrap_err("failed to allocate shared memory")?, + )), + }; + assert!(memory.len() >= data_len); + + Ok(memory) + } + + fn handle_finished_drop_tokens(&mut self) -> eyre::Result<()> { + loop { + match self.drop_stream.try_recv() { + Ok(token) => match self.sent_out_shared_memory.remove(&token) { + Some(region) => self.add_to_cache(region), + None => tracing::warn!("received unknown finished drop token `{token:?}`"), + }, + Err(flume::TryRecvError::Empty) => break, + Err(flume::TryRecvError::Disconnected) => { + bail!("event stream was closed before sending all expected drop tokens") + } + } + } + Ok(()) + } + + fn add_to_cache(&mut self, memory: ShmemHandle) { + const MAX_CACHE_SIZE: usize = 20; + + self.cache.push_back(memory); + while self.cache.len() > MAX_CACHE_SIZE { + self.cache.pop_front(); + } + } + + /// Returns the full dataflow descriptor that this node is part of. + /// + /// This method returns the parsed dataflow YAML file. + pub fn dataflow_descriptor(&self) -> &Descriptor { + &self.dataflow_descriptor + } +} + +impl Drop for DoraNode { + #[tracing::instrument(skip(self), fields(self.id = %self.id), level = "trace")] + fn drop(&mut self) { + // close all outputs first to notify subscribers as early as possible + if let Err(err) = self + .control_channel + .report_closed_outputs( + std::mem::take(&mut self.node_config.outputs) + .into_iter() + .collect(), + ) + .context("failed to close outputs on drop") + { + tracing::warn!("{err:?}") + } + + while !self.sent_out_shared_memory.is_empty() { + if self.drop_stream.len() == 0 { + tracing::trace!( + "waiting for {} remaining drop tokens", + self.sent_out_shared_memory.len() + ); + } + + match self.drop_stream.recv_timeout(Duration::from_secs(10)) { + Ok(token) => { + self.sent_out_shared_memory.remove(&token); + } + Err(flume::RecvTimeoutError::Disconnected) => { + tracing::warn!( + "finished_drop_tokens channel closed while still waiting for drop tokens; \ + closing {} shared memory regions that might still be used", + self.sent_out_shared_memory.len() + ); + break; + } + Err(flume::RecvTimeoutError::Timeout) => { + tracing::warn!( + "timeout while waiting for drop tokens; \ + closing {} shared memory regions that might still be used", + self.sent_out_shared_memory.len() + ); + break; + } + } + } + + if let Err(err) = self.control_channel.report_outputs_done() { + tracing::warn!("{err:?}") + } + } +} + +pub struct DataSample { + inner: DataSampleInner, + len: usize, +} + +impl DataSample { + fn finalize(self) -> (Option, Option<(ShmemHandle, DropToken)>) { + match self.inner { + DataSampleInner::Shmem(shared_memory) => { + let drop_token = DropToken::generate(); + let data = DataMessage::SharedMemory { + shared_memory_id: shared_memory.get_os_id().to_owned(), + len: self.len, + drop_token, + }; + (Some(data), Some((shared_memory, drop_token))) + } + DataSampleInner::Vec(buffer) => (Some(DataMessage::Vec(buffer)), None), + } + } +} + +impl Deref for DataSample { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + let slice = match &self.inner { + DataSampleInner::Shmem(handle) => unsafe { handle.as_slice() }, + DataSampleInner::Vec(data) => data, + }; + &slice[..self.len] + } +} + +impl DerefMut for DataSample { + fn deref_mut(&mut self) -> &mut Self::Target { + let slice = match &mut self.inner { + DataSampleInner::Shmem(handle) => unsafe { handle.as_slice_mut() }, + DataSampleInner::Vec(data) => data, + }; + &mut slice[..self.len] + } +} + +impl From>> for DataSample { + fn from(value: AVec>) -> Self { + Self { + len: value.len(), + inner: DataSampleInner::Vec(value), + } + } +} + +impl std::fmt::Debug for DataSample { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let kind = match &self.inner { + DataSampleInner::Shmem(_) => "SharedMemory", + DataSampleInner::Vec(_) => "Vec", + }; + f.debug_struct("DataSample") + .field("len", &self.len) + .field("kind", &kind) + .finish_non_exhaustive() + } +} + +enum DataSampleInner { + Shmem(ShmemHandle), + Vec(AVec>), +} + +struct ShmemHandle(Box); + +impl Deref for ShmemHandle { + type Target = Shmem; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ShmemHandle { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +unsafe impl Send for ShmemHandle {} +unsafe impl Sync for ShmemHandle {} diff --git a/apis/rust/operator/Cargo.toml b/apis/rust/operator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6391df393af798df3e46dc02b4d0b4ac9587ca2c --- /dev/null +++ b/apis/rust/operator/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "dora-operator-api" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-operator-api-macros = { workspace = true } +dora-operator-api-types = { workspace = true } +dora-arrow-convert = { workspace = true } diff --git a/apis/rust/operator/macros/Cargo.toml b/apis/rust/operator/macros/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4e4c6593283329c248783f0d42771038e0b8e9e2 --- /dev/null +++ b/apis/rust/operator/macros/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-operator-api-macros" +version.workspace = true +edition = "2021" +description = "Rust API Macros for Dora Operator" +documentation.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "1.0.81", features = ["full"] } +quote = "1.0.10" +proc-macro2 = "1.0.32" diff --git a/apis/rust/operator/macros/src/lib.rs b/apis/rust/operator/macros/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b5384f675075301cb36011ea2b0a4a7c08faf37 --- /dev/null +++ b/apis/rust/operator/macros/src/lib.rs @@ -0,0 +1,74 @@ +use proc_macro::TokenStream; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +extern crate proc_macro; + +#[proc_macro] +pub fn register_operator(item: TokenStream) -> TokenStream { + // convert from `TokenStream` to `TokenStream2`, which is used by the + // `syn` crate + let item = TokenStream2::from(item); + // generate the dora wrapper functions + let generated = register_operator_impl(&item).unwrap_or_else(|err| err.to_compile_error()); + // output the generated functions + let tokens = quote! { + #generated + }; + // convert the type back from `TokenStream2` to `TokenStream` + tokens.into() +} + +/// Generates the wrapper functions for the annotated function. +fn register_operator_impl(item: &TokenStream2) -> syn::Result { + // parse the type given to the `register_operator` macro + let operator_ty: syn::TypePath = syn::parse2(item.clone()) + .map_err(|e| syn::Error::new(e.span(), "expected type as argument"))?; + + let init = quote! { + #[no_mangle] + pub unsafe extern "C" fn dora_init_operator() -> dora_operator_api::types::DoraInitResult { + dora_operator_api::raw::dora_init_operator::<#operator_ty>() + } + + const _DORA_INIT_OPERATOR: dora_operator_api::types::DoraInitOperator = dora_operator_api::types::DoraInitOperator { + init_operator: dora_init_operator, + }; + }; + + let drop = quote! { + #[no_mangle] + pub unsafe extern "C" fn dora_drop_operator(operator_context: *mut std::ffi::c_void) + -> dora_operator_api::types::DoraResult + { + dora_operator_api::raw::dora_drop_operator::<#operator_ty>(operator_context) + } + + const _DORA_DROP_OPERATOR: dora_operator_api::types::DoraDropOperator = dora_operator_api::types::DoraDropOperator { + drop_operator: dora_drop_operator, + }; + }; + + let on_event = quote! { + #[no_mangle] + pub unsafe extern "C" fn dora_on_event( + event: &mut dora_operator_api::types::RawEvent, + send_output: &dora_operator_api::types::SendOutput, + operator_context: *mut std::ffi::c_void, + ) -> dora_operator_api::types::OnEventResult { + dora_operator_api::raw::dora_on_event::<#operator_ty>( + event, send_output, operator_context + ) + } + + const _DORA_ON_EVENT: dora_operator_api::types::DoraOnEvent = dora_operator_api::types::DoraOnEvent { + on_event: dora_operator_api::types::OnEventFn(dora_on_event), + }; + }; + + Ok(quote! { + #init + #drop + #on_event + }) +} diff --git a/apis/rust/operator/src/lib.rs b/apis/rust/operator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..262fcdd3f11ac289bff7f9dc38669301e83bb823 --- /dev/null +++ b/apis/rust/operator/src/lib.rs @@ -0,0 +1,69 @@ +//! The operator API is a framework to implement dora operators. +//! The implemented operator will be managed by `dora`. +//! +//! This framework enable us to make optimisation and provide advanced features. +//! It is the recommended way of using `dora`. +//! +//! An operator requires to be registered and implement the `DoraOperator` trait. +//! It is composed of an `on_event` method that defines the behaviour +//! of the operator when there is an event such as receiving an input for example. +//! +//! Try it out with: +//! +//! ```bash +//! dora new op --kind operator +//! ``` +//! + +#![warn(unsafe_op_in_unsafe_fn)] +#![allow(clippy::missing_safety_doc)] + +pub use dora_arrow_convert::*; +pub use dora_operator_api_macros::register_operator; +pub use dora_operator_api_types as types; +pub use types::DoraStatus; +use types::{ + arrow::{self, array::Array}, + Metadata, Output, SendOutput, +}; + +pub mod raw; + +#[derive(Debug)] +#[non_exhaustive] +pub enum Event<'a> { + Input { id: &'a str, data: ArrowData }, + InputParseError { id: &'a str, error: String }, + InputClosed { id: &'a str }, + Stop, +} + +pub trait DoraOperator: Default { + #[allow(clippy::result_unit_err)] // we use a () error type only for testing + fn on_event( + &mut self, + event: &Event, + output_sender: &mut DoraOutputSender, + ) -> Result; +} + +pub struct DoraOutputSender<'a>(&'a SendOutput); + +impl DoraOutputSender<'_> { + /// Send an output from the operator: + /// - `id` is the `output_id` as defined in your dataflow. + /// - `data` is the data that should be sent + pub fn send(&mut self, id: String, data: impl Array) -> Result<(), String> { + let (data_array, schema) = + arrow::ffi::to_ffi(&data.into_data()).map_err(|err| err.to_string())?; + let result = self.0.send_output.call(Output { + id: id.into(), + data_array, + schema, + metadata: Metadata { + open_telemetry_context: String::new().into(), // TODO + }, + }); + result.into_result() + } +} diff --git a/apis/rust/operator/src/raw.rs b/apis/rust/operator/src/raw.rs new file mode 100644 index 0000000000000000000000000000000000000000..6634e68fb71f17d21ff10b46a9c88173a302779d --- /dev/null +++ b/apis/rust/operator/src/raw.rs @@ -0,0 +1,80 @@ +use crate::{DoraOperator, DoraOutputSender, DoraStatus, Event}; +use dora_operator_api_types::{ + arrow, DoraInitResult, DoraResult, OnEventResult, RawEvent, SendOutput, +}; +use std::ffi::c_void; + +pub type OutputFnRaw = unsafe extern "C" fn( + id_start: *const u8, + id_len: usize, + data_start: *const u8, + data_len: usize, + output_context: *const c_void, +) -> isize; + +pub unsafe fn dora_init_operator() -> DoraInitResult { + let operator: O = Default::default(); + let ptr: *mut O = Box::leak(Box::new(operator)); + let operator_context: *mut c_void = ptr.cast(); + DoraInitResult { + result: DoraResult { error: None }, + operator_context, + } +} + +pub unsafe fn dora_drop_operator(operator_context: *mut c_void) -> DoraResult { + let raw: *mut O = operator_context.cast(); + drop(unsafe { Box::from_raw(raw) }); + DoraResult { error: None } +} + +pub unsafe fn dora_on_event( + event: &mut RawEvent, + send_output: &SendOutput, + operator_context: *mut std::ffi::c_void, +) -> OnEventResult { + let mut output_sender = DoraOutputSender(send_output); + + let operator: &mut O = unsafe { &mut *operator_context.cast() }; + + let event_variant = if let Some(input) = &mut event.input { + let Some(data_array) = input.data_array.take() else { + return OnEventResult { + result: DoraResult::from_error("data already taken".to_string()), + status: DoraStatus::Continue, + }; + }; + let data = unsafe { arrow::ffi::from_ffi(data_array, &input.schema) }; + + match data { + Ok(data) => Event::Input { + id: &input.id, + data: arrow::array::make_array(data).into(), + }, + Err(err) => Event::InputParseError { + id: &input.id, + error: format!("{err}"), + }, + } + } else if let Some(input_id) = &event.input_closed { + Event::InputClosed { id: input_id } + } else if event.stop { + Event::Stop + } else { + // ignore unknown events + return OnEventResult { + result: DoraResult { error: None }, + status: DoraStatus::Continue, + }; + }; + match operator.on_event(&event_variant, &mut output_sender) { + Ok(status) => OnEventResult { + result: DoraResult { error: None }, + status, + }, + Err(error) => OnEventResult { + result: DoraResult::from_error(error), + status: DoraStatus::Stop, + }, + } +} diff --git a/apis/rust/operator/types/Cargo.toml b/apis/rust/operator/types/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..bfc281c4ebb2521e0b1ca719cde4d2a9dfc25140 --- /dev/null +++ b/apis/rust/operator/types/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-operator-api-types" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arrow = { workspace = true, features = ["ffi"] } +dora-arrow-convert = { workspace = true } + +[dependencies.safer-ffi] +version = "0.1.4" +features = ["headers", "inventory-0-3-1"] diff --git a/apis/rust/operator/types/src/lib.rs b/apis/rust/operator/types/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d235114dd1a967e31a1628a6a5e3a5673ed58aab --- /dev/null +++ b/apis/rust/operator/types/src/lib.rs @@ -0,0 +1,212 @@ +#![deny(elided_lifetimes_in_paths)] // required for safer-ffi +#![allow(improper_ctypes_definitions)] +#![allow(clippy::missing_safety_doc)] + +pub use arrow; +use dora_arrow_convert::{ArrowData, IntoArrow}; +pub use safer_ffi; + +use arrow::{ + array::Array, + ffi::{FFI_ArrowArray, FFI_ArrowSchema}, +}; +use core::slice; +use safer_ffi::{ + char_p::{self, char_p_boxed}, + closure::ArcDynFn1, + derive_ReprC, ffi_export, +}; +use std::{ops::Deref, path::Path}; + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +pub struct DoraInitOperator { + pub init_operator: unsafe extern "C" fn() -> DoraInitResult, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +#[derive(Debug)] +pub struct DoraInitResult { + pub result: DoraResult, + pub operator_context: *mut std::ffi::c_void, +} +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +pub struct DoraDropOperator { + pub drop_operator: unsafe extern "C" fn(operator_context: *mut std::ffi::c_void) -> DoraResult, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +#[derive(Debug)] +pub struct DoraResult { + pub error: Option>, +} + +impl DoraResult { + pub const SUCCESS: Self = Self { error: None }; + + pub fn from_error(error: String) -> Self { + Self { + error: Some(Box::new(safer_ffi::String::from(error)).into()), + } + } + + pub fn error(&self) -> Option<&str> { + self.error.as_deref().map(|s| s.deref()) + } + + pub fn into_result(self) -> Result<(), String> { + match self.error { + None => Ok(()), + Some(error) => { + let converted = safer_ffi::boxed::Box_::into(error); + Err((*converted).into()) + } + } + } +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +pub struct DoraOnEvent { + pub on_event: OnEventFn, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(transparent)] +pub struct OnEventFn( + pub unsafe extern "C" fn( + event: &mut RawEvent, + send_output: &SendOutput, + operator_context: *mut std::ffi::c_void, + ) -> OnEventResult, +); + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +#[derive(Debug)] +pub struct RawEvent { + pub input: Option>, + pub input_closed: Option, + pub stop: bool, + pub error: Option, +} + +#[derive_ReprC] +#[repr(opaque)] +#[derive(Debug)] +pub struct Input { + pub id: safer_ffi::String, + pub data_array: Option, + pub schema: FFI_ArrowSchema, + pub metadata: Metadata, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +#[derive(Debug)] +pub struct Metadata { + pub open_telemetry_context: safer_ffi::String, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +pub struct SendOutput { + pub send_output: ArcDynFn1, +} + +#[derive_ReprC] +#[repr(opaque)] +#[derive(Debug)] +pub struct Output { + pub id: safer_ffi::String, + pub data_array: FFI_ArrowArray, + pub schema: FFI_ArrowSchema, + pub metadata: Metadata, +} + +#[derive_ReprC] +#[ffi_export] +#[repr(C)] +#[derive(Debug)] +pub struct OnEventResult { + pub result: DoraResult, + pub status: DoraStatus, +} + +#[derive_ReprC] +#[ffi_export] +#[derive(Debug)] +#[repr(u8)] +pub enum DoraStatus { + Continue = 0, + Stop = 1, + StopAll = 2, +} + +#[ffi_export] +pub fn dora_read_input_id(input: &Input) -> char_p_boxed { + char_p::new(&*input.id) +} + +#[ffi_export] +pub fn dora_free_input_id(_input_id: char_p_boxed) {} + +#[ffi_export] +pub fn dora_read_data(input: &mut Input) -> Option> { + let data_array = input.data_array.take()?; + let data = unsafe { arrow::ffi::from_ffi(data_array, &input.schema).ok()? }; + let array = ArrowData(arrow::array::make_array(data)); + let bytes: &[u8] = TryFrom::try_from(&array).ok()?; + Some(bytes.to_owned().into()) +} + +#[ffi_export] +pub fn dora_free_data(_data: safer_ffi::Vec) {} + +#[ffi_export] +pub unsafe fn dora_send_operator_output( + send_output: &SendOutput, + id: safer_ffi::char_p::char_p_ref<'_>, + data_ptr: *const u8, + data_len: usize, +) -> DoraResult { + let result = || { + let data = unsafe { slice::from_raw_parts(data_ptr, data_len) }; + let arrow_data = data.to_owned().into_arrow(); + let (data_array, schema) = + arrow::ffi::to_ffi(&arrow_data.into_data()).map_err(|err| err.to_string())?; + let output = Output { + id: id.to_str().to_owned().into(), + data_array, + schema, + metadata: Metadata { + open_telemetry_context: String::new().into(), // TODO + }, + }; + Result::<_, String>::Ok(output) + }; + match result() { + Ok(output) => send_output.send_output.call(output), + Err(error) => DoraResult { + error: Some(Box::new(safer_ffi::String::from(error)).into()), + }, + } +} + +pub fn generate_headers(target_file: &Path) -> ::std::io::Result<()> { + ::safer_ffi::headers::builder() + .to_file(target_file)? + .generate() +} diff --git a/binaries/cli/Cargo.toml b/binaries/cli/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..15ee04248c922f3c90b91263465f88c9de45e1c1 --- /dev/null +++ b/binaries/cli/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "dora-cli" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "dora" +path = "src/main.rs" + +[features] +default = ["tracing"] +tracing = ["dep:dora-tracing"] + +[dependencies] +clap = { version = "4.0.3", features = ["derive"] } +eyre = "0.6.8" +dora-core = { workspace = true } +dora-node-api-c = { workspace = true } +dora-operator-api-c = { workspace = true } +serde = { version = "1.0.136", features = ["derive"] } +serde_yaml = "0.9.11" +webbrowser = "0.8.3" +serde_json = "1.0.86" +termcolor = "1.1.3" +uuid = { version = "1.7", features = ["v7", "serde"] } +inquire = "0.5.2" +communication-layer-request-reply = { workspace = true } +notify = "5.1.0" +ctrlc = "3.2.5" +tracing = "0.1.36" +dora-tracing = { workspace = true, optional = true } +bat = "0.24.0" +dora-daemon = { workspace = true } +dora-coordinator = { workspace = true } +dora-runtime = { workspace = true } +tokio = { version = "1.20.1", features = ["full"] } +tokio-stream = { version = "0.1.8", features = ["io-util", "net"] } +futures = "0.3.21" +duration-str = "0.5" diff --git a/binaries/cli/src/attach.rs b/binaries/cli/src/attach.rs new file mode 100644 index 0000000000000000000000000000000000000000..62745e149cc020e78ddc20e1a5bfb6458c925a03 --- /dev/null +++ b/binaries/cli/src/attach.rs @@ -0,0 +1,146 @@ +use communication_layer_request_reply::TcpRequestReplyConnection; +use dora_core::{ + descriptor::{resolve_path, CoreNodeKind, Descriptor}, + topics::{ControlRequest, ControlRequestReply}, +}; +use eyre::Context; +use notify::event::ModifyKind; +use notify::{Config, Event as NotifyEvent, EventKind, RecommendedWatcher, RecursiveMode, Watcher}; +use std::collections::HashMap; +use std::{path::PathBuf, sync::mpsc, time::Duration}; +use tracing::{error, info}; +use uuid::Uuid; + +pub fn attach_dataflow( + dataflow: Descriptor, + dataflow_path: PathBuf, + dataflow_id: Uuid, + session: &mut TcpRequestReplyConnection, + hot_reload: bool, +) -> Result<(), eyre::ErrReport> { + let (tx, rx) = mpsc::sync_channel(2); + + // Generate path hashmap + let mut node_path_lookup = HashMap::new(); + + let nodes = dataflow.resolve_aliases_and_set_defaults()?; + + let working_dir = dataflow_path + .canonicalize() + .context("failed to canoncialize dataflow path")? + .parent() + .ok_or_else(|| eyre::eyre!("canonicalized dataflow path has no parent"))? + .to_owned(); + + for node in nodes { + match node.kind { + // Reloading Custom Nodes is not supported. See: https://github.com/dora-rs/dora/pull/239#discussion_r1154313139 + CoreNodeKind::Custom(_cn) => (), + CoreNodeKind::Runtime(rn) => { + for op in rn.operators.iter() { + if let dora_core::descriptor::OperatorSource::Python(python_source) = + &op.config.source + { + let path = resolve_path(&python_source.source, &working_dir) + .wrap_err_with(|| { + format!("failed to resolve node source `{}`", python_source.source) + })?; + node_path_lookup + .insert(path, (dataflow_id, node.id.clone(), Some(op.id.clone()))); + } + // Reloading non-python operator is not supported. See: https://github.com/dora-rs/dora/pull/239#discussion_r1154313139 + } + } + } + } + + // Setup dataflow file watcher if reload option is set. + let watcher_tx = tx.clone(); + let _watcher = if hot_reload { + let hash = node_path_lookup.clone(); + let paths = hash.keys(); + let notifier = move |event| { + if let Ok(NotifyEvent { + paths, + kind: EventKind::Modify(ModifyKind::Data(_data)), + .. + }) = event + { + for path in paths { + if let Some((dataflow_id, node_id, operator_id)) = node_path_lookup.get(&path) { + watcher_tx + .send(ControlRequest::Reload { + dataflow_id: *dataflow_id, + node_id: node_id.clone(), + operator_id: operator_id.clone(), + }) + .context("Could not send reload request to the cli loop") + .unwrap(); + } + } + // TODO: Manage different file event + } + }; + + let mut watcher = RecommendedWatcher::new( + notifier, + Config::default().with_poll_interval(Duration::from_secs(1)), + )?; + + for path in paths { + watcher.watch(path, RecursiveMode::Recursive)?; + } + Some(watcher) + } else { + None + }; + + // Setup Ctrlc Watcher to stop dataflow after ctrlc + let ctrlc_tx = tx; + let mut ctrlc_sent = false; + ctrlc::set_handler(move || { + if ctrlc_sent { + std::process::abort(); + } else { + if ctrlc_tx + .send(ControlRequest::Stop { + dataflow_uuid: dataflow_id, + grace_duration: None, + }) + .is_err() + { + // bail!("failed to report ctrl-c event to dora-daemon"); + } + ctrlc_sent = true; + } + }) + .wrap_err("failed to set ctrl-c handler")?; + + loop { + let control_request = match rx.recv_timeout(Duration::from_secs(1)) { + Err(_err) => ControlRequest::Check { + dataflow_uuid: dataflow_id, + }, + Ok(reload_event) => reload_event, + }; + + let reply_raw = session + .request(&serde_json::to_vec(&control_request)?) + .wrap_err("failed to send request message to coordinator")?; + let result: ControlRequestReply = + serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + match result { + ControlRequestReply::DataflowStarted { uuid: _ } => (), + ControlRequestReply::DataflowStopped { uuid, result } => { + info!("dataflow {uuid} stopped"); + break result + .map_err(|err| eyre::eyre!(err)) + .wrap_err("dataflow failed"); + } + ControlRequestReply::DataflowReloaded { uuid } => { + info!("dataflow {uuid} reloaded") + } + other => error!("Received unexpected Coordinator Reply: {:#?}", other), + }; + } +} diff --git a/binaries/cli/src/build.rs b/binaries/cli/src/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..496402a811782df50c227fa58d2a51ef031c730d --- /dev/null +++ b/binaries/cli/src/build.rs @@ -0,0 +1,81 @@ +use dora_core::{ + config::OperatorId, + descriptor::{Descriptor, SINGLE_OPERATOR_DEFAULT_ID}, +}; +use eyre::{eyre, Context}; +use std::{path::Path, process::Command}; + +pub fn build(dataflow: &Path) -> eyre::Result<()> { + let descriptor = Descriptor::blocking_read(dataflow)?; + let dataflow_absolute = if dataflow.is_relative() { + std::env::current_dir().unwrap().join(dataflow) + } else { + dataflow.to_owned() + }; + let working_dir = dataflow_absolute.parent().unwrap(); + + let default_op_id = OperatorId::from(SINGLE_OPERATOR_DEFAULT_ID.to_string()); + + for node in descriptor.nodes { + match node.kind()? { + dora_core::descriptor::NodeKind::Standard(_) => { + run_build_command(node.build.as_deref(), working_dir).with_context(|| { + format!("build command failed for standard node `{}`", node.id) + })? + } + dora_core::descriptor::NodeKind::Runtime(runtime_node) => { + for operator in &runtime_node.operators { + run_build_command(operator.config.build.as_deref(), working_dir).with_context( + || { + format!( + "build command failed for operator `{}/{}`", + node.id, operator.id + ) + }, + )?; + } + } + dora_core::descriptor::NodeKind::Custom(custom_node) => { + run_build_command(custom_node.build.as_deref(), working_dir).with_context(|| { + format!("build command failed for custom node `{}`", node.id) + })? + } + dora_core::descriptor::NodeKind::Operator(operator) => { + run_build_command(operator.config.build.as_deref(), working_dir).with_context( + || { + format!( + "build command failed for operator `{}/{}`", + node.id, + operator.id.as_ref().unwrap_or(&default_op_id) + ) + }, + )? + } + } + } + + Ok(()) +} + +fn run_build_command(build: Option<&str>, working_dir: &Path) -> eyre::Result<()> { + if let Some(build) = build { + let mut split = build.split_whitespace(); + let mut cmd = Command::new( + split + .next() + .ok_or_else(|| eyre!("build command is empty"))?, + ); + cmd.args(split); + cmd.current_dir(working_dir); + let exit_status = cmd + .status() + .wrap_err_with(|| format!("failed to run `{}`", build))?; + if exit_status.success() { + Ok(()) + } else { + Err(eyre!("build command returned an error code")) + } + } else { + Ok(()) + } +} diff --git a/binaries/cli/src/check.rs b/binaries/cli/src/check.rs new file mode 100644 index 0000000000000000000000000000000000000000..49bc948e3806310b783d45ff7b15d1c2b9bf5cd8 --- /dev/null +++ b/binaries/cli/src/check.rs @@ -0,0 +1,77 @@ +use crate::connect_to_coordinator; +use communication_layer_request_reply::TcpRequestReplyConnection; +use dora_core::topics::{ControlRequest, ControlRequestReply}; +use eyre::{bail, Context}; +use std::{ + io::{IsTerminal, Write}, + net::SocketAddr, +}; +use termcolor::{Color, ColorChoice, ColorSpec, WriteColor}; + +pub fn check_environment(coordinator_addr: SocketAddr) -> eyre::Result<()> { + let mut error_occurred = false; + + let color_choice = if std::io::stdout().is_terminal() { + ColorChoice::Auto + } else { + ColorChoice::Never + }; + let mut stdout = termcolor::StandardStream::stdout(color_choice); + + // check whether coordinator is running + write!(stdout, "Dora Coordinator: ")?; + let mut session = match connect_to_coordinator(coordinator_addr) { + Ok(session) => { + let _ = stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green))); + writeln!(stdout, "ok")?; + Some(session) + } + Err(_) => { + let _ = stdout.set_color(ColorSpec::new().set_fg(Some(Color::Red))); + writeln!(stdout, "not running")?; + error_occurred = true; + None + } + }; + + let _ = stdout.reset(); + + // check whether daemon is running + write!(stdout, "Dora Daemon: ")?; + if session + .as_deref_mut() + .map(daemon_running) + .transpose()? + .unwrap_or(false) + { + let _ = stdout.set_color(ColorSpec::new().set_fg(Some(Color::Green))); + writeln!(stdout, "ok")?; + } else { + let _ = stdout.set_color(ColorSpec::new().set_fg(Some(Color::Red))); + writeln!(stdout, "not running")?; + error_occurred = true; + } + let _ = stdout.reset(); + + writeln!(stdout)?; + + if error_occurred { + bail!("Environment check failed."); + } + + Ok(()) +} + +pub fn daemon_running(session: &mut TcpRequestReplyConnection) -> Result { + let reply_raw = session + .request(&serde_json::to_vec(&ControlRequest::DaemonConnected).unwrap()) + .wrap_err("failed to send DaemonConnected message")?; + + let reply = serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + let running = match reply { + ControlRequestReply::DaemonConnected(running) => running, + other => bail!("unexpected reply to daemon connection check: {other:?}"), + }; + + Ok(running) +} diff --git a/binaries/cli/src/graph/mermaid-template.html b/binaries/cli/src/graph/mermaid-template.html new file mode 100644 index 0000000000000000000000000000000000000000..2620ba33481a51cb8a0f62d70d78f451afa87466 --- /dev/null +++ b/binaries/cli/src/graph/mermaid-template.html @@ -0,0 +1,17 @@ + + + + + + + + +
+ ____insert____ +
+ + + + + diff --git a/binaries/cli/src/graph/mod.rs b/binaries/cli/src/graph/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c28f9eb40f74cded6a37cff1ca9eec67b5f7c1e9 --- /dev/null +++ b/binaries/cli/src/graph/mod.rs @@ -0,0 +1,67 @@ +use std::{fs::File, io::Write, path::Path}; + +use dora_core::descriptor::Descriptor; +use eyre::Context; + +const MERMAID_TEMPLATE: &str = include_str!("mermaid-template.html"); + +pub(crate) fn create(dataflow: std::path::PathBuf, mermaid: bool, open: bool) -> eyre::Result<()> { + if mermaid { + let visualized = visualize_as_mermaid(&dataflow)?; + println!("{visualized}"); + println!( + "Paste the above output on https://mermaid.live/ or in a \ + ```mermaid code block on GitHub to display it." + ); + } else { + let html = visualize_as_html(&dataflow)?; + + let working_dir = std::env::current_dir().wrap_err("failed to get current working dir")?; + let graph_filename = match dataflow.file_stem().and_then(|n| n.to_str()) { + Some(name) => format!("{name}-graph"), + None => "graph".into(), + }; + let mut extra = 0; + let path = loop { + let adjusted_file_name = if extra == 0 { + format!("{graph_filename}.html") + } else { + format!("{graph_filename}.{extra}.html") + }; + let path = working_dir.join(&adjusted_file_name); + if path.exists() { + extra += 1; + } else { + break path; + } + }; + + let mut file = File::create(&path).context("failed to create graph HTML file")?; + file.write_all(html.as_bytes())?; + + println!( + "View graph by opening the following in your browser:\n file://{}", + path.display() + ); + + if open { + webbrowser::open(path.as_os_str().to_str().unwrap())?; + } + } + Ok(()) +} + +pub fn visualize_as_html(dataflow: &Path) -> eyre::Result { + let mermaid = visualize_as_mermaid(dataflow)?; + Ok(MERMAID_TEMPLATE.replacen("____insert____", &mermaid, 1)) +} + +pub fn visualize_as_mermaid(dataflow: &Path) -> eyre::Result { + let descriptor = Descriptor::blocking_read(dataflow) + .with_context(|| format!("failed to read dataflow at `{}`", dataflow.display()))?; + let visualized = descriptor + .visualize_as_mermaid() + .context("failed to visualize descriptor")?; + + Ok(visualized) +} diff --git a/binaries/cli/src/logs.rs b/binaries/cli/src/logs.rs new file mode 100644 index 0000000000000000000000000000000000000000..a15f11b1ffe875ad9bb4c22a08a805d181a5fec5 --- /dev/null +++ b/binaries/cli/src/logs.rs @@ -0,0 +1,45 @@ +use communication_layer_request_reply::TcpRequestReplyConnection; +use dora_core::topics::{ControlRequest, ControlRequestReply}; +use eyre::{bail, Context, Result}; +use uuid::Uuid; + +use bat::{Input, PrettyPrinter}; + +pub fn logs( + session: &mut TcpRequestReplyConnection, + uuid: Option, + name: Option, + node: String, +) -> Result<()> { + let logs = { + let reply_raw = session + .request( + &serde_json::to_vec(&ControlRequest::Logs { + uuid, + name, + node: node.clone(), + }) + .wrap_err("")?, + ) + .wrap_err("failed to send Logs request message")?; + + let reply = serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + match reply { + ControlRequestReply::Logs(logs) => logs, + other => bail!("unexpected reply to daemon logs: {other:?}"), + } + }; + + PrettyPrinter::new() + .header(false) + .grid(false) + .line_numbers(false) + .paging_mode(bat::PagingMode::QuitIfOneScreen) + .inputs(vec![Input::from_bytes(&logs) + .name("Logs") + .title(format!("Logs from {node}.").as_str())]) + .print() + .wrap_err("Something went wrong with viewing log file")?; + + Ok(()) +} diff --git a/binaries/cli/src/main.rs b/binaries/cli/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..099aed2e8b32e5ccf185c9900267dd461380b75d --- /dev/null +++ b/binaries/cli/src/main.rs @@ -0,0 +1,580 @@ +use attach::attach_dataflow; +use clap::Parser; +use communication_layer_request_reply::{RequestReplyLayer, TcpLayer, TcpRequestReplyConnection}; +use dora_coordinator::Event; +use dora_core::{ + descriptor::Descriptor, + topics::{ + ControlRequest, ControlRequestReply, DataflowId, DORA_COORDINATOR_PORT_CONTROL_DEFAULT, + DORA_COORDINATOR_PORT_DEFAULT, + }, +}; +use dora_daemon::Daemon; +#[cfg(feature = "tracing")] +use dora_tracing::set_up_tracing; +use duration_str::parse; +use eyre::{bail, Context}; +use std::net::SocketAddr; +use std::{ + net::{IpAddr, Ipv4Addr}, + path::PathBuf, + time::Duration, +}; +use tokio::runtime::Builder; +use uuid::Uuid; + +mod attach; +mod build; +mod check; +mod graph; +mod logs; +mod template; +mod up; + +const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +const LISTEN_WILDCARD: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + +#[derive(Debug, clap::Parser)] +#[clap(version)] +struct Args { + #[clap(subcommand)] + command: Command, +} + +/// dora-rs cli client +#[derive(Debug, clap::Subcommand)] +enum Command { + /// Check if the coordinator and the daemon is running. + Check { + /// Path to the dataflow descriptor file (enables additional checks) + #[clap(long, value_name = "PATH", value_hint = clap::ValueHint::FilePath)] + dataflow: Option, + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + }, + /// Generate a visualization of the given graph using mermaid.js. Use --open to open browser. + Graph { + /// Path to the dataflow descriptor file + #[clap(value_name = "PATH", value_hint = clap::ValueHint::FilePath)] + dataflow: PathBuf, + /// Visualize the dataflow as a Mermaid diagram (instead of HTML) + #[clap(long, action)] + mermaid: bool, + /// Open the HTML visualization in the browser + #[clap(long, action)] + open: bool, + }, + /// Run build commands provided in the given dataflow. + Build { + /// Path to the dataflow descriptor file + #[clap(value_name = "PATH", value_hint = clap::ValueHint::FilePath)] + dataflow: PathBuf, + }, + /// Generate a new project, node or operator. Choose the language between Rust, Python, C or C++. + New { + #[clap(flatten)] + args: CommandNew, + #[clap(hide = true, long)] + internal_create_with_path_dependencies: bool, + }, + /// Spawn coordinator and daemon in local mode (with default config) + Up { + /// Use a custom configuration + #[clap(long, hide = true, value_name = "PATH", value_hint = clap::ValueHint::FilePath)] + config: Option, + }, + /// Destroy running coordinator and daemon. If some dataflows are still running, they will be stopped first. + Destroy { + /// Use a custom configuration + #[clap(long, hide = true)] + config: Option, + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + }, + /// Start the given dataflow path. Attach a name to the running dataflow by using --name. + Start { + /// Path to the dataflow descriptor file + #[clap(value_name = "PATH", value_hint = clap::ValueHint::FilePath)] + dataflow: PathBuf, + /// Assign a name to the dataflow + #[clap(long)] + name: Option, + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + /// Attach to the dataflow and wait for its completion + #[clap(long, action)] + attach: bool, + /// Enable hot reloading (Python only) + #[clap(long, action)] + hot_reload: bool, + }, + /// Stop the given dataflow UUID. If no id is provided, you will be able to choose between the running dataflows. + Stop { + /// UUID of the dataflow that should be stopped + uuid: Option, + /// Name of the dataflow that should be stopped + #[clap(long)] + name: Option, + /// Kill the dataflow if it doesn't stop after the given duration + #[clap(long, value_name = "DURATION")] + #[arg(value_parser = parse)] + grace_duration: Option, + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + }, + /// List running dataflows. + List { + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + }, + // Planned for future releases: + // Dashboard, + /// Show logs of a given dataflow and node. + #[command(allow_missing_positional = true)] + Logs { + /// Identifier of the dataflow + #[clap(value_name = "UUID_OR_NAME")] + dataflow: Option, + /// Show logs for the given node + #[clap(value_name = "NAME")] + node: String, + /// Address of the dora coordinator + #[clap(long, value_name = "IP", default_value_t = LOCALHOST)] + coordinator_addr: IpAddr, + /// Port number of the coordinator control server + #[clap(long, value_name = "PORT", default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + coordinator_port: u16, + }, + // Metrics, + // Stats, + // Get, + // Upgrade, + /// Run daemon + Daemon { + /// Unique identifier for the machine (required for distributed dataflows) + #[clap(long)] + machine_id: Option, + /// The IP address and port this daemon will bind to. + #[clap(long, default_value_t = SocketAddr::new(LISTEN_WILDCARD, 0))] + addr: SocketAddr, + /// Address and port number of the dora coordinator + #[clap(long, default_value_t = SocketAddr::new(LOCALHOST, DORA_COORDINATOR_PORT_DEFAULT))] + coordinator_addr: SocketAddr, + #[clap(long, hide = true)] + run_dataflow: Option, + }, + /// Run runtime + Runtime, + /// Run coordinator + Coordinator { + /// Network interface to bind to for daemon communication + #[clap(long, default_value_t = LISTEN_WILDCARD)] + interface: IpAddr, + /// Port number to bind to for daemon communication + #[clap(long, default_value_t = DORA_COORDINATOR_PORT_DEFAULT)] + port: u16, + /// Network interface to bind to for control communication + #[clap(long, default_value_t = LISTEN_WILDCARD)] + control_interface: IpAddr, + /// Port number to bind to for control communication + #[clap(long, default_value_t = DORA_COORDINATOR_PORT_CONTROL_DEFAULT)] + control_port: u16, + }, +} + +#[derive(Debug, clap::Args)] +pub struct CommandNew { + /// The entity that should be created + #[clap(long, value_enum, default_value_t = Kind::Dataflow)] + kind: Kind, + /// The programming language that should be used + #[clap(long, value_enum, default_value_t = Lang::Rust)] + lang: Lang, + /// Desired name of the entity + name: String, + /// Where to create the entity + #[clap(hide = true)] + path: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)] +enum Kind { + Dataflow, + Operator, + CustomNode, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)] +enum Lang { + Rust, + Python, + C, + Cxx, +} + +fn main() { + if let Err(err) = run() { + eprintln!("{err:#}"); + std::process::exit(1); + } +} + +fn run() -> eyre::Result<()> { + let args = Args::parse(); + + #[cfg(feature = "tracing")] + match args.command { + Command::Daemon { .. } => { + set_up_tracing("dora-daemon").context("failed to set up tracing subscriber")?; + } + Command::Runtime => { + // Do not set the runtime in the cli. + } + Command::Coordinator { .. } => { + set_up_tracing("dora-coordinator").context("failed to set up tracing subscriber")?; + } + _ => { + set_up_tracing("dora-cli").context("failed to set up tracing subscriber")?; + } + }; + + match args.command { + Command::Check { + dataflow, + coordinator_addr, + coordinator_port, + } => match dataflow { + Some(dataflow) => { + let working_dir = dataflow + .canonicalize() + .context("failed to canonicalize dataflow path")? + .parent() + .ok_or_else(|| eyre::eyre!("dataflow path has no parent dir"))? + .to_owned(); + Descriptor::blocking_read(&dataflow)?.check(&working_dir)?; + check::check_environment((coordinator_addr, coordinator_port).into())? + } + None => check::check_environment((coordinator_addr, coordinator_port).into())?, + }, + Command::Graph { + dataflow, + mermaid, + open, + } => { + graph::create(dataflow, mermaid, open)?; + } + Command::Build { dataflow } => { + build::build(&dataflow)?; + } + Command::New { + args, + internal_create_with_path_dependencies, + } => template::create(args, internal_create_with_path_dependencies)?, + Command::Up { config } => { + up::up(config.as_deref())?; + } + Command::Logs { + dataflow, + node, + coordinator_addr, + coordinator_port, + } => { + let mut session = connect_to_coordinator((coordinator_addr, coordinator_port).into()) + .wrap_err("failed to connect to dora coordinator")?; + let uuids = query_running_dataflows(&mut *session) + .wrap_err("failed to query running dataflows")?; + if let Some(dataflow) = dataflow { + let uuid = Uuid::parse_str(&dataflow).ok(); + let name = if uuid.is_some() { None } else { Some(dataflow) }; + logs::logs(&mut *session, uuid, name, node)? + } else { + let uuid = match &uuids[..] { + [] => bail!("No dataflows are running"), + [uuid] => uuid.clone(), + _ => inquire::Select::new("Choose dataflow to show logs:", uuids).prompt()?, + }; + logs::logs(&mut *session, Some(uuid.uuid), None, node)? + } + } + Command::Start { + dataflow, + name, + coordinator_addr, + coordinator_port, + attach, + hot_reload, + } => { + let dataflow_descriptor = + Descriptor::blocking_read(&dataflow).wrap_err("Failed to read yaml dataflow")?; + let working_dir = dataflow + .canonicalize() + .context("failed to canonicalize dataflow path")? + .parent() + .ok_or_else(|| eyre::eyre!("dataflow path has no parent dir"))? + .to_owned(); + dataflow_descriptor + .check(&working_dir) + .wrap_err("Could not validate yaml")?; + + let mut session = connect_to_coordinator((coordinator_addr, coordinator_port).into()) + .wrap_err("failed to connect to dora coordinator")?; + let dataflow_id = start_dataflow( + dataflow_descriptor.clone(), + name, + working_dir, + &mut *session, + )?; + + if attach { + attach_dataflow( + dataflow_descriptor, + dataflow, + dataflow_id, + &mut *session, + hot_reload, + )? + } + } + Command::List { + coordinator_addr, + coordinator_port, + } => match connect_to_coordinator((coordinator_addr, coordinator_port).into()) { + Ok(mut session) => list(&mut *session)?, + Err(_) => { + bail!("No dora coordinator seems to be running."); + } + }, + Command::Stop { + uuid, + name, + grace_duration, + coordinator_addr, + coordinator_port, + } => { + let mut session = connect_to_coordinator((coordinator_addr, coordinator_port).into()) + .wrap_err("could not connect to dora coordinator")?; + match (uuid, name) { + (Some(uuid), _) => stop_dataflow(uuid, grace_duration, &mut *session)?, + (None, Some(name)) => stop_dataflow_by_name(name, grace_duration, &mut *session)?, + (None, None) => stop_dataflow_interactive(grace_duration, &mut *session)?, + } + } + Command::Destroy { + config, + coordinator_addr, + coordinator_port, + } => up::destroy( + config.as_deref(), + (coordinator_addr, coordinator_port).into(), + )?, + Command::Coordinator { + interface, + port, + control_interface, + control_port, + } => { + let rt = Builder::new_multi_thread() + .enable_all() + .build() + .context("tokio runtime failed")?; + rt.block_on(async { + let bind = SocketAddr::new(interface, port); + let bind_control = SocketAddr::new(control_interface, control_port); + let (port, task) = + dora_coordinator::start(bind, bind_control, futures::stream::empty::()) + .await?; + println!("Listening for incoming daemon connection on {port}"); + task.await + }) + .context("failed to run dora-coordinator")? + } + Command::Daemon { + coordinator_addr, + addr, + machine_id, + run_dataflow, + } => { + let rt = Builder::new_multi_thread() + .enable_all() + .build() + .context("tokio runtime failed")?; + rt.block_on(async { + match run_dataflow { + Some(dataflow_path) => { + tracing::info!("Starting dataflow `{}`", dataflow_path.display()); + if coordinator_addr != SocketAddr::new(LOCALHOST, DORA_COORDINATOR_PORT_DEFAULT){ + tracing::info!( + "Not using coordinator addr {} as `run_dataflow` is for local dataflow only. Please use the `start` command for remote coordinator", + coordinator_addr + ); + } + + Daemon::run_dataflow(&dataflow_path).await + } + None => { + if coordinator_addr.ip() == LOCALHOST { + tracing::info!("Starting in local mode"); + } + Daemon::run(coordinator_addr, machine_id.unwrap_or_default(), addr).await + } + } + }) + .context("failed to run dora-daemon")? + } + Command::Runtime => dora_runtime::main().context("Failed to run dora-runtime")?, + }; + + Ok(()) +} + +fn start_dataflow( + dataflow: Descriptor, + name: Option, + local_working_dir: PathBuf, + session: &mut TcpRequestReplyConnection, +) -> Result { + let reply_raw = session + .request( + &serde_json::to_vec(&ControlRequest::Start { + dataflow, + name, + local_working_dir, + }) + .unwrap(), + ) + .wrap_err("failed to send start dataflow message")?; + + let result: ControlRequestReply = + serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + match result { + ControlRequestReply::DataflowStarted { uuid } => { + eprintln!("{uuid}"); + Ok(uuid) + } + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected start dataflow reply: {other:?}"), + } +} + +fn stop_dataflow_interactive( + grace_duration: Option, + session: &mut TcpRequestReplyConnection, +) -> eyre::Result<()> { + let uuids = query_running_dataflows(session).wrap_err("failed to query running dataflows")?; + if uuids.is_empty() { + eprintln!("No dataflows are running"); + } else { + let selection = inquire::Select::new("Choose dataflow to stop:", uuids).prompt()?; + stop_dataflow(selection.uuid, grace_duration, session)?; + } + + Ok(()) +} + +fn stop_dataflow( + uuid: Uuid, + grace_duration: Option, + session: &mut TcpRequestReplyConnection, +) -> Result<(), eyre::ErrReport> { + let reply_raw = session + .request( + &serde_json::to_vec(&ControlRequest::Stop { + dataflow_uuid: uuid, + grace_duration, + }) + .unwrap(), + ) + .wrap_err("failed to send dataflow stop message")?; + let result: ControlRequestReply = + serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + match result { + ControlRequestReply::DataflowStopped { uuid: _, result } => result + .map_err(|err| eyre::eyre!(err)) + .wrap_err("dataflow failed"), + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected stop dataflow reply: {other:?}"), + } +} + +fn stop_dataflow_by_name( + name: String, + grace_duration: Option, + session: &mut TcpRequestReplyConnection, +) -> Result<(), eyre::ErrReport> { + let reply_raw = session + .request( + &serde_json::to_vec(&ControlRequest::StopByName { + name, + grace_duration, + }) + .unwrap(), + ) + .wrap_err("failed to send dataflow stop_by_name message")?; + let result: ControlRequestReply = + serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + match result { + ControlRequestReply::DataflowStopped { uuid: _, result } => result + .map_err(|err| eyre::eyre!(err)) + .wrap_err("dataflow failed"), + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected stop dataflow reply: {other:?}"), + } +} + +fn list(session: &mut TcpRequestReplyConnection) -> Result<(), eyre::ErrReport> { + let ids = query_running_dataflows(session)?; + + if ids.is_empty() { + eprintln!("No dataflows are running"); + } else { + println!("Running dataflows:"); + for id in ids { + println!("- {id}"); + } + } + + Ok(()) +} + +fn query_running_dataflows( + session: &mut TcpRequestReplyConnection, +) -> Result, eyre::ErrReport> { + let reply_raw = session + .request(&serde_json::to_vec(&ControlRequest::List).unwrap()) + .wrap_err("failed to send list message")?; + let reply: ControlRequestReply = + serde_json::from_slice(&reply_raw).wrap_err("failed to parse reply")?; + let ids = match reply { + ControlRequestReply::DataflowList { dataflows } => dataflows, + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected list dataflow reply: {other:?}"), + }; + + Ok(ids) +} + +fn connect_to_coordinator( + coordinator_addr: SocketAddr, +) -> std::io::Result> { + TcpLayer::new().connect(coordinator_addr) +} diff --git a/binaries/cli/src/template/c/dataflow-template.yml b/binaries/cli/src/template/c/dataflow-template.yml new file mode 100644 index 0000000000000000000000000000000000000000..f28ae8fcd0b6cf25e4a8bee2ee4a59a2d03d480b --- /dev/null +++ b/binaries/cli/src/template/c/dataflow-template.yml @@ -0,0 +1,24 @@ +nodes: + - id: op_1 + operator: + shared-library: build/op_1 + inputs: + foo: dora/timer/millis/100 + outputs: + - bar + - id: op_2 + operator: + shared-library: build/op_2 + inputs: + foo: dora/timer/secs/2 + outputs: + - bar + + - id: custom-node_1 + custom: + source: build/node_1 + inputs: + input-1: op_1/bar + input-2: op_2/bar + outputs: + - foo diff --git a/binaries/cli/src/template/c/mod.rs b/binaries/cli/src/template/c/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..21bbef016b8a60ebf3bb42a034d582e9105fc518 --- /dev/null +++ b/binaries/cli/src/template/c/mod.rs @@ -0,0 +1,124 @@ +use dora_node_api_c::HEADER_NODE_API; +use dora_operator_api_c::{HEADER_OPERATOR_API, HEADER_OPERATOR_TYPES}; +use eyre::{bail, Context}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +pub fn create(args: crate::CommandNew) -> eyre::Result<()> { + let crate::CommandNew { + kind, + lang: _, + name, + path, + } = args; + + match kind { + crate::Kind::Operator => create_operator(name, path), + crate::Kind::CustomNode => create_custom_node(name, path), + crate::Kind::Dataflow => create_dataflow(name, path), + } +} + +fn create_dataflow(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const DATAFLOW_YML: &str = include_str!("dataflow-template.yml"); + + if name.contains('/') { + bail!("dataflow name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("dataflow name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let dataflow_yml = DATAFLOW_YML.replace("___name___", &name); + let dataflow_yml_path = root.join("dataflow.yml"); + fs::write(&dataflow_yml_path, dataflow_yml) + .with_context(|| format!("failed to write `{}`", dataflow_yml_path.display()))?; + + create_operator("op_1".into(), Some(root.join("op_1")))?; + create_operator("op_2".into(), Some(root.join("op_2")))?; + create_custom_node("node_1".into(), Some(root.join("node_1")))?; + + println!( + "Created new C dataflow at `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_operator(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const OPERATOR: &str = include_str!("operator/operator-template.c"); + + if name.contains('/') { + bail!("operator name must not contain `/` separators"); + } + if name.contains('-') { + bail!("operator name must not contain `-` separators"); + } + if !name.is_ascii() { + bail!("operator name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let operator_path = root.join("operator.c"); + fs::write(&operator_path, OPERATOR) + .with_context(|| format!("failed to write `{}`", operator_path.display()))?; + let header_api_path = root.join("operator_api.h"); + let header_type_path = root.join("operator_types.h"); + fs::write(&header_api_path, HEADER_OPERATOR_API) + .with_context(|| format!("failed to write `{}`", header_api_path.display()))?; + fs::write(&header_type_path, HEADER_OPERATOR_TYPES) + .with_context(|| format!("failed to write `{}`", header_type_path.display()))?; + + // TODO: Makefile? + + println!( + "Created new C operator `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_custom_node(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const NODE: &str = include_str!("node/node-template.c"); + + if name.contains('/') { + bail!("node name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("node name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let node_path = root.join("node.c"); + fs::write(&node_path, NODE) + .with_context(|| format!("failed to write `{}`", node_path.display()))?; + let header_path = root.join("node_api.h"); + fs::write(&header_path, HEADER_NODE_API) + .with_context(|| format!("failed to write `{}`", header_path.display()))?; + + // TODO: Makefile? + + println!( + "Created new C custom node `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} diff --git a/binaries/cli/src/template/c/node/node-template.c b/binaries/cli/src/template/c/node/node-template.c new file mode 100644 index 0000000000000000000000000000000000000000..48f40918238f03837ebe4c34e14f9c40ab4b78d3 --- /dev/null +++ b/binaries/cli/src/template/c/node/node-template.c @@ -0,0 +1,53 @@ +#include +#include +#include +#include "node_api.h" + +// sleep +#ifdef _WIN32 +#include +#else +#include +#endif + +int main() +{ + void *dora_context = init_dora_context_from_env(); + if (dora_context == NULL) + { + fprintf(stderr, "failed to init dora context\n"); + return -1; + } + + while (1) + { + void *event = dora_next_event(dora_context); + if (event == NULL) + { + printf("[c node] ERROR: unexpected end of event\n"); + return -1; + } + + enum DoraEventType ty = read_dora_event_type(event); + if (ty == DoraEventType_Input) + { + char *id; + size_t id_len; + read_dora_input_id(event, &id, &id_len); + + char *data; + size_t data_len; + read_dora_input_data(event, &data, &data_len); + + char out_id[] = "foo"; + char out_data[] = "bar"; + dora_send_output(dora_context, out_id, strlen(out_id), out_data, strlen(out_data)); + + free_dora_event(event); // do not use `id` or `data` pointer after freeing + } + } + + free_dora_context(dora_context); + + return 0; +} diff --git a/binaries/cli/src/template/c/operator/operator-template.c b/binaries/cli/src/template/c/operator/operator-template.c new file mode 100644 index 0000000000000000000000000000000000000000..d8dde97946e2fd531505b1d27ee8d63a7d27df83 --- /dev/null +++ b/binaries/cli/src/template/c/operator/operator-template.c @@ -0,0 +1,64 @@ +#include "operator_api.h" +#include +#include +#include +#include + +DoraInitResult_t dora_init_operator(void) +{ + // allocate memory for storing context across function calls (optional) + void *context = malloc(10); + // TODO initialize context memory + + DoraInitResult_t result = {.operator_context = context}; + return result; +} + +DoraResult_t dora_drop_operator(void *operator_context) +{ + free(operator_context); + + DoraResult_t result = {}; + return result; +} + +OnEventResult_t dora_on_event( + RawEvent_t *event, + const SendOutput_t *send_output, + void *operator_context) +{ + if (event->input != NULL) + { + char id[event->input->id.len + 1]; + memcpy(id, event->input->id.ptr, event->input->id.len); + id[event->input->id.len] = 0; + + // example for matching on input name + if (strcmp(id, "foo") == 0) + { + char *out_id = "bar"; + char *out_id_heap = strdup(out_id); + + int data_alloc_size = 10; + void *out_data = malloc(data_alloc_size); + // TODO initialize out_data + + Output_t output = {.id = { + .ptr = (uint8_t *)out_id_heap, + .len = strlen(out_id_heap), + .cap = strlen(out_id_heap) + 1, + }, + .data = {.ptr = (uint8_t *)out_data, .len = strlen(out_data), .cap = data_alloc_size}}; + DoraResult_t res = (send_output->send_output.call)(send_output->send_output.env_ptr, output); + + OnEventResult_t result = {.result = res, .status = DORA_STATUS_CONTINUE}; + return result; + } + } + if (event->stop) + { + printf("C operator received stop event\n"); + } + OnEventResult_t result = {.status = DORA_STATUS_CONTINUE}; + return result; +} diff --git a/binaries/cli/src/template/cxx/dataflow-template.yml b/binaries/cli/src/template/cxx/dataflow-template.yml new file mode 100644 index 0000000000000000000000000000000000000000..9559eb88ac8e1db24697e39de64e0a6eb92bf4d1 --- /dev/null +++ b/binaries/cli/src/template/cxx/dataflow-template.yml @@ -0,0 +1,23 @@ +nodes: + - id: runtime-node_1 + operators: + - id: op_1 + shared-library: build/op_1 + inputs: + tick: dora/timer/millis/100 + outputs: + - some-output + - id: op_2 + shared-library: build/op_2 + inputs: + tick: dora/timer/secs/2 + outputs: + - some-output + + - id: custom-node_1 + custom: + source: build/node_1 + inputs: + tick: dora/timer/secs/1 + input-1: op_1/some-output + input-2: op_2/some-output diff --git a/binaries/cli/src/template/cxx/mod.rs b/binaries/cli/src/template/cxx/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..579f39e18caea889b61fa340398a4f1d91716743 --- /dev/null +++ b/binaries/cli/src/template/cxx/mod.rs @@ -0,0 +1,114 @@ +use eyre::{bail, Context}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +pub fn create(args: crate::CommandNew) -> eyre::Result<()> { + let crate::CommandNew { + kind, + lang: _, + name, + path, + } = args; + + match kind { + crate::Kind::Operator => create_operator(name, path), + crate::Kind::CustomNode => create_custom_node(name, path), + crate::Kind::Dataflow => create_dataflow(name, path), + } +} + +fn create_dataflow(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const DATAFLOW_YML: &str = include_str!("dataflow-template.yml"); + + if name.contains('/') { + bail!("dataflow name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("dataflow name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let dataflow_yml = DATAFLOW_YML.replace("___name___", &name); + let dataflow_yml_path = root.join("dataflow.yml"); + fs::write(&dataflow_yml_path, dataflow_yml) + .with_context(|| format!("failed to write `{}`", dataflow_yml_path.display()))?; + + create_operator("op_1".into(), Some(root.join("op_1")))?; + create_operator("op_2".into(), Some(root.join("op_2")))?; + create_custom_node("node_1".into(), Some(root.join("node_1")))?; + + println!( + "Created new C++ dataflow at `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_operator(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const OPERATOR: &str = include_str!("operator-template.cc"); + const HEADER: &str = include_str!("operator-template.h"); + + if name.contains('/') { + bail!("operator name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("operator name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let operator_path = root.join("operator.cc"); + fs::write(&operator_path, OPERATOR) + .with_context(|| format!("failed to write `{}`", operator_path.display()))?; + let header_path = root.join("operator.h"); + fs::write(&header_path, HEADER) + .with_context(|| format!("failed to write `{}`", header_path.display()))?; + + // TODO: Makefile? + + println!( + "Created new C++ operator `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_custom_node(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const NODE: &str = include_str!("node-template.cc"); + + if name.contains('/') { + bail!("node name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("node name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let node_path = root.join("node.cc"); + fs::write(&node_path, NODE) + .with_context(|| format!("failed to write `{}`", node_path.display()))?; + + // TODO: Makefile? + + println!( + "Created new C++ custom node `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} diff --git a/binaries/cli/src/template/cxx/node-template.cc b/binaries/cli/src/template/cxx/node-template.cc new file mode 100644 index 0000000000000000000000000000000000000000..731d48f8cc302b2bda2c7eb16911b73f2e7632e4 --- /dev/null +++ b/binaries/cli/src/template/cxx/node-template.cc @@ -0,0 +1,36 @@ +#include "dora-node-api.h" // adjust this path if necessary + +#include +#include + +int main() +{ + std::cout << "HELLO FROM C++" << std::endl; + unsigned char counter = 0; + + auto dora_node = init_dora_node(); + + while (1) + { + auto input = next_input(dora_node.inputs); + if (input.end_of_input) + { + break; + } + counter += 1; + + std::cout << "Received input " << std::string(input.id) << " (counter: " << (unsigned int)counter << ")" << std::endl; + + std::vector out_vec{counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto result = send_output(dora_node.send_output, "counter", out_slice); + auto error = std::string(result.error); + if (!error.empty()) + { + std::cerr << "Error: " << error << std::endl; + return -1; + } + } + + return 0; +} diff --git a/binaries/cli/src/template/cxx/operator-template.cc b/binaries/cli/src/template/cxx/operator-template.cc new file mode 100644 index 0000000000000000000000000000000000000000..2b8121883cc0cf33c7921bc3359bd4698d4855fd --- /dev/null +++ b/binaries/cli/src/template/cxx/operator-template.cc @@ -0,0 +1,23 @@ +#include "operator.h" +#include +#include +#include "../build/dora-operator-api.h" + +Operator::Operator() {} + +std::unique_ptr new_operator() +{ + return std::make_unique(); +} + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender) +{ + op.counter += 1; + std::cout << "Rust API operator received input `" << id.data() << "` with data `" << (unsigned int)data[0] << "` (internal counter: " << (unsigned int)op.counter << ")" << std::endl; + + std::vector out_vec{op.counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto send_result = send_output(output_sender, rust::Str("status"), out_slice); + DoraOnInputResult result = {send_result.error, false}; + return result; +} diff --git a/binaries/cli/src/template/cxx/operator-template.h b/binaries/cli/src/template/cxx/operator-template.h new file mode 100644 index 0000000000000000000000000000000000000000..a5fe9da1e1725ae25986720dab760ae469fd6b3f --- /dev/null +++ b/binaries/cli/src/template/cxx/operator-template.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include "../../../apis/c/operator/operator_api.h" + +class Operator +{ +public: + Operator(); + unsigned char counter; +}; + +#include "dora-operator-api.h" // adjust this path if necessary + +std::unique_ptr new_operator(); + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender); diff --git a/binaries/cli/src/template/mod.rs b/binaries/cli/src/template/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e68cb5480539c7c6d40931cb941f060913be843 --- /dev/null +++ b/binaries/cli/src/template/mod.rs @@ -0,0 +1,13 @@ +mod c; +mod cxx; +mod python; +mod rust; + +pub fn create(args: crate::CommandNew, use_path_deps: bool) -> eyre::Result<()> { + match args.lang { + crate::Lang::Rust => rust::create(args, use_path_deps), + crate::Lang::Python => python::create(args), + crate::Lang::C => c::create(args), + crate::Lang::Cxx => cxx::create(args), + } +} diff --git a/binaries/cli/src/template/python/dataflow-template.yml b/binaries/cli/src/template/python/dataflow-template.yml new file mode 100644 index 0000000000000000000000000000000000000000..782d78a616d8338d5f4ef0d732f8b2bcb4b14c70 --- /dev/null +++ b/binaries/cli/src/template/python/dataflow-template.yml @@ -0,0 +1,23 @@ +nodes: + - id: op_1 + operator: + python: op_1/op_1.py + inputs: + tick: dora/timer/millis/100 + outputs: + - some-output + - id: op_2 + operator: + python: op_2/op_2.py + inputs: + tick: dora/timer/secs/2 + outputs: + - some-output + + - id: custom-node_1 + custom: + source: ./node_1/node_1.py + inputs: + tick: dora/timer/secs/1 + input-1: op_1/some-output + input-2: op_2/some-output diff --git a/binaries/cli/src/template/python/mod.rs b/binaries/cli/src/template/python/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a475fd8569378cb42c0d16b20533a7d4be63b988 --- /dev/null +++ b/binaries/cli/src/template/python/mod.rs @@ -0,0 +1,97 @@ +use eyre::{bail, Context}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +pub fn create(args: crate::CommandNew) -> eyre::Result<()> { + let crate::CommandNew { + kind, + lang: _, + name, + path, + } = args; + + match kind { + crate::Kind::Operator => create_operator(name, path), + crate::Kind::CustomNode => create_custom_node(name, path), + crate::Kind::Dataflow => create_dataflow(name, path), + } +} + +fn create_operator(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const OPERATOR_PY: &str = include_str!("operator/operator-template.py"); + + if name.contains('/') { + bail!("Operator name must not contain `/` separators"); + } + if name.contains('.') { + bail!("Operator name must not contain `.` to not be confused for an extension"); + } + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let operator_path = root.join(format!("{name}.py")); + fs::write(&operator_path, OPERATOR_PY) + .with_context(|| format!("failed to write `{}`", operator_path.display()))?; + + println!( + "Created new Python operator `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} +fn create_custom_node(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const NODE_PY: &str = include_str!("node/node-template.py"); + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let node_path = root.join(format!("{name}.py")); + fs::write(&node_path, NODE_PY) + .with_context(|| format!("failed to write `{}`", node_path.display()))?; + + println!( + "Created new Python node `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_dataflow(name: String, path: Option) -> Result<(), eyre::ErrReport> { + const DATAFLOW_YML: &str = include_str!("dataflow-template.yml"); + + if name.contains('/') { + bail!("dataflow name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("dataflow name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let dataflow_yml = DATAFLOW_YML.replace("___name___", &name); + let dataflow_yml_path = root.join("dataflow.yml"); + fs::write(&dataflow_yml_path, dataflow_yml) + .with_context(|| format!("failed to write `{}`", dataflow_yml_path.display()))?; + + create_operator("op_1".into(), Some(root.join("op_1")))?; + create_operator("op_2".into(), Some(root.join("op_2")))?; + create_custom_node("node_1".into(), Some(root.join("node_1")))?; + + println!( + "Created new yaml dataflow `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} diff --git a/binaries/cli/src/template/python/node/node-template.py b/binaries/cli/src/template/python/node/node-template.py new file mode 100644 index 0000000000000000000000000000000000000000..f29c0a9984be21cd31b0622dd8111c6ef07ef6a7 --- /dev/null +++ b/binaries/cli/src/template/python/node/node-template.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from dora import Node + +node = Node() + +event = node.next() +if event["type"] == "INPUT": + print( + f"""Node received: + id: {event["id"]}, + value: {event["value"]}, + metadata: {event["metadata"]}""" + ) diff --git a/binaries/cli/src/template/python/operator/operator-template.py b/binaries/cli/src/template/python/operator/operator-template.py new file mode 100644 index 0000000000000000000000000000000000000000..a6713444ac009f14dcc8aedd60ebb585f38ccbc9 --- /dev/null +++ b/binaries/cli/src/template/python/operator/operator-template.py @@ -0,0 +1,45 @@ +from dora import DoraStatus + + +class Operator: + """ + Template docstring + """ + + def __init__(self): + """Called on initialisation""" + pass + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + """ + + Args: + dora_event: Event containing an `id`, `data` and `metadata`. + send_output Callable[[str, bytes | pa.Array, Optional[dict]], None]: + Function for sending output to the dataflow: + - First argument is the `output_id` + - Second argument is the data as either bytes or `pa.Array` + - Third argument is dora metadata dict + e.g.: `send_output("bbox", pa.array([100], type=pa.uint8()), dora_event["metadata"])` + + Returns: + DoraStatus: + CONTINUE means that the operator will + keep listening for further inputs. + STOP means that the operator stop listening for inputs. + + """ + if dora_event["type"] == "INPUT": + print( + f"Received input {dora_event['id']}, with data: {dora_event['value']}" + ) + + return DoraStatus.CONTINUE + + def __del__(self): + """Called before being deleted""" + pass diff --git a/binaries/cli/src/template/rust/Cargo-template.toml b/binaries/cli/src/template/rust/Cargo-template.toml new file mode 100644 index 0000000000000000000000000000000000000000..d28ac4f88801934c3019ade93e1bcb9cef17c5b9 --- /dev/null +++ b/binaries/cli/src/template/rust/Cargo-template.toml @@ -0,0 +1,3 @@ +[workspace] +resolver = "2" +members = ["op_1", "op_2", "node_1"] diff --git a/binaries/cli/src/template/rust/dataflow-template.yml b/binaries/cli/src/template/rust/dataflow-template.yml new file mode 100644 index 0000000000000000000000000000000000000000..0f019fe400265bc607d83c2d29abe3c6a8405dca --- /dev/null +++ b/binaries/cli/src/template/rust/dataflow-template.yml @@ -0,0 +1,26 @@ +nodes: + - id: op_1 + operator: + build: cargo build -p op_1 + shared-library: target/debug/op_1 + inputs: + tick: dora/timer/millis/100 + outputs: + - some-output + - id: op_2 + operator: + build: cargo build -p op_2 + shared-library: target/debug/op_2 + inputs: + tick: dora/timer/secs/2 + outputs: + - some-output + + - id: custom-node_1 + custom: + build: cargo build -p node_1 + source: target/debug/node_1 + inputs: + tick: dora/timer/secs/1 + input-1: op_1/some-output + input-2: op_2/some-output diff --git a/binaries/cli/src/template/rust/mod.rs b/binaries/cli/src/template/rust/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..84e5f30e3206b07da785f8caa20aa901b008d496 --- /dev/null +++ b/binaries/cli/src/template/rust/mod.rs @@ -0,0 +1,163 @@ +use eyre::{bail, Context}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); +pub fn create(args: crate::CommandNew, use_path_deps: bool) -> eyre::Result<()> { + let crate::CommandNew { + kind, + lang: _, + name, + path, + } = args; + + match kind { + crate::Kind::Operator => create_operator(name, path, use_path_deps), + crate::Kind::CustomNode => create_custom_node(name, path, use_path_deps), + crate::Kind::Dataflow => create_dataflow(name, path, use_path_deps), + } +} + +fn create_dataflow( + name: String, + path: Option, + use_path_deps: bool, +) -> Result<(), eyre::ErrReport> { + const DATAFLOW_YML: &str = include_str!("dataflow-template.yml"); + const WORKSPACE_CARGO_TOML: &str = include_str!("Cargo-template.toml"); + + if name.contains('/') { + bail!("dataflow name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("dataflow name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + + let dataflow_yml = DATAFLOW_YML.replace("___name___", &name); + let dataflow_yml_path = root.join("dataflow.yml"); + fs::write(&dataflow_yml_path, dataflow_yml) + .with_context(|| format!("failed to write `{}`", dataflow_yml_path.display()))?; + let cargo_toml = WORKSPACE_CARGO_TOML.replace("___name___", &name); + let cargo_toml_path = root.join("Cargo.toml"); + fs::write(&cargo_toml_path, cargo_toml) + .with_context(|| format!("failed to write `{}`", cargo_toml_path.display()))?; + + create_operator("op_1".into(), Some(root.join("op_1")), use_path_deps)?; + create_operator("op_2".into(), Some(root.join("op_2")), use_path_deps)?; + create_custom_node("node_1".into(), Some(root.join("node_1")), use_path_deps)?; + + println!( + "Created new Rust dataflow at `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_operator( + name: String, + path: Option, + use_path_deps: bool, +) -> Result<(), eyre::ErrReport> { + const CARGO_TOML: &str = include_str!("operator/Cargo-template.toml"); + const LIB_RS: &str = include_str!("operator/lib-template.rs"); + + if name.contains('/') { + bail!("operator name must not contain `/` separators"); + } + if name.contains('-') { + bail!( + "operator name must not contain `-` separators as + it get replaced by `_` as a static library." + ); + } + if !name.is_ascii() { + bail!("operator name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + let src = root.join("src"); + fs::create_dir(&src) + .with_context(|| format!("failed to create directory `{}`", src.display()))?; + + let dep = if use_path_deps { + r#"dora-operator-api = { path = "../../apis/rust/operator" }"#.to_string() + } else { + format!(r#"dora-operator-api = "{VERSION}""#) + }; + let cargo_toml = CARGO_TOML + .replace("___name___", &name) + .replace("dora-operator-api = {}", &dep); + + let cargo_toml_path = root.join("Cargo.toml"); + fs::write(&cargo_toml_path, cargo_toml) + .with_context(|| format!("failed to write `{}`", cargo_toml_path.display()))?; + + let lib_rs_path = src.join("lib.rs"); + fs::write(&lib_rs_path, LIB_RS) + .with_context(|| format!("failed to write `{}`", lib_rs_path.display()))?; + + println!( + "Created new Rust operator `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} + +fn create_custom_node( + name: String, + path: Option, + use_path_deps: bool, +) -> Result<(), eyre::ErrReport> { + const CARGO_TOML: &str = include_str!("node/Cargo-template.toml"); + const MAIN_RS: &str = include_str!("node/main-template.rs"); + + if name.contains('/') { + bail!("node name must not contain `/` separators"); + } + if !name.is_ascii() { + bail!("node name must be ASCII"); + } + + // create directories + let root = path.as_deref().unwrap_or_else(|| Path::new(&name)); + fs::create_dir(root) + .with_context(|| format!("failed to create directory `{}`", root.display()))?; + let src = root.join("src"); + fs::create_dir(&src) + .with_context(|| format!("failed to create directory `{}`", src.display()))?; + + let dep = if use_path_deps { + r#"dora-node-api = { path = "../../apis/rust/node" }"#.to_string() + } else { + format!(r#"dora-node-api = "{VERSION}""#) + }; + let cargo_toml = CARGO_TOML + .replace("___name___", &name) + .replace("dora-node-api = {}", &dep); + let cargo_toml_path = root.join("Cargo.toml"); + fs::write(&cargo_toml_path, cargo_toml) + .with_context(|| format!("failed to write `{}`", cargo_toml_path.display()))?; + + let main_rs_path = src.join("main.rs"); + fs::write(&main_rs_path, MAIN_RS) + .with_context(|| format!("failed to write `{}`", main_rs_path.display()))?; + + println!( + "Created new Rust custom node `{name}` at {}", + Path::new(".").join(root).display() + ); + + Ok(()) +} diff --git a/binaries/cli/src/template/rust/node/Cargo-template.toml b/binaries/cli/src/template/rust/node/Cargo-template.toml new file mode 100644 index 0000000000000000000000000000000000000000..fa46f49a0ec890c68e57e5f81a074fbe9ef51ce7 --- /dev/null +++ b/binaries/cli/src/template/rust/node/Cargo-template.toml @@ -0,0 +1,9 @@ +[package] +name = "___name___" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = {} diff --git a/binaries/cli/src/template/rust/node/main-template.rs b/binaries/cli/src/template/rust/node/main-template.rs new file mode 100644 index 0000000000000000000000000000000000000000..659f706c2b20d8f3ea14977850e91bf2cee5d564 --- /dev/null +++ b/binaries/cli/src/template/rust/node/main-template.rs @@ -0,0 +1,21 @@ +use dora_node_api::{DoraNode, Event}; +use std::error::Error; + +fn main() -> Result<(), Box> { + let (mut node, mut events) = DoraNode::init_from_env()?; + + while let Some(event) = events.recv() { + match event { + Event::Input { + id, + metadata, + data: _, + } => match id.as_str() { + other => eprintln!("Received input `{other}`"), + }, + _ => {} + } + } + + Ok(()) +} diff --git a/binaries/cli/src/template/rust/operator/Cargo-template.toml b/binaries/cli/src/template/rust/operator/Cargo-template.toml new file mode 100644 index 0000000000000000000000000000000000000000..19ecbb9cdd01214764dbc5c6f0102223b2e32cb0 --- /dev/null +++ b/binaries/cli/src/template/rust/operator/Cargo-template.toml @@ -0,0 +1,12 @@ +[package] +name = "___name___" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +crate-type = ["cdylib"] + +[dependencies] +dora-operator-api = {} diff --git a/binaries/cli/src/template/rust/operator/lib-template.rs b/binaries/cli/src/template/rust/operator/lib-template.rs new file mode 100644 index 0000000000000000000000000000000000000000..b40be985429f29af71fe8bc8c853a10cfefa5864 --- /dev/null +++ b/binaries/cli/src/template/rust/operator/lib-template.rs @@ -0,0 +1,25 @@ +use dora_operator_api::{register_operator, DoraOperator, DoraOutputSender, DoraStatus, Event}; + +register_operator!(ExampleOperator); + +#[derive(Debug, Default)] +struct ExampleOperator { + example_field: u32, +} + +impl DoraOperator for ExampleOperator { + fn on_event( + &mut self, + event: &Event, + output_sender: &mut DoraOutputSender, + ) -> Result { + match event { + Event::Input { id, data } => match id { + other => eprintln!("Received input {other}"), + }, + _ => {} + } + + Ok(DoraStatus::Continue) + } +} diff --git a/binaries/cli/src/up.rs b/binaries/cli/src/up.rs new file mode 100644 index 0000000000000000000000000000000000000000..66b2bd20d859c7b76ac9f924673ba19a27f88548 --- /dev/null +++ b/binaries/cli/src/up.rs @@ -0,0 +1,104 @@ +use crate::{check::daemon_running, connect_to_coordinator, LOCALHOST}; +use dora_core::topics::{ControlRequest, DORA_COORDINATOR_PORT_CONTROL_DEFAULT}; +use eyre::Context; +use std::{fs, net::SocketAddr, path::Path, process::Command, time::Duration}; +#[derive(Debug, Default, serde::Serialize, serde::Deserialize)] +struct UpConfig {} + +pub(crate) fn up(config_path: Option<&Path>) -> eyre::Result<()> { + let UpConfig {} = parse_dora_config(config_path)?; + let coordinator_addr = (LOCALHOST, DORA_COORDINATOR_PORT_CONTROL_DEFAULT).into(); + let mut session = match connect_to_coordinator(coordinator_addr) { + Ok(session) => session, + Err(_) => { + start_coordinator().wrap_err("failed to start dora-coordinator")?; + + loop { + match connect_to_coordinator(coordinator_addr) { + Ok(session) => break session, + Err(_) => { + // sleep a bit until the coordinator accepts connections + std::thread::sleep(Duration::from_millis(50)); + } + } + } + } + }; + + if !daemon_running(&mut *session)? { + start_daemon().wrap_err("failed to start dora-daemon")?; + + // wait a bit until daemon is connected + let mut i = 0; + const WAIT_S: f32 = 0.1; + loop { + if daemon_running(&mut *session)? { + break; + } + i += 1; + if i > 20 { + eyre::bail!("daemon not connected after {}s", WAIT_S * i as f32); + } + std::thread::sleep(Duration::from_secs_f32(WAIT_S)); + } + } + + Ok(()) +} + +pub(crate) fn destroy( + config_path: Option<&Path>, + coordinator_addr: SocketAddr, +) -> Result<(), eyre::ErrReport> { + let UpConfig {} = parse_dora_config(config_path)?; + match connect_to_coordinator(coordinator_addr) { + Ok(mut session) => { + // send destroy command to dora-coordinator + session + .request(&serde_json::to_vec(&ControlRequest::Destroy).unwrap()) + .wrap_err("failed to send destroy message")?; + println!("Send destroy command to dora-coordinator"); + } + Err(_) => { + eprintln!("Could not connect to dora-coordinator"); + } + } + + Ok(()) +} + +fn parse_dora_config(config_path: Option<&Path>) -> Result { + let path = config_path.or_else(|| Some(Path::new("dora-config.yml")).filter(|p| p.exists())); + let config = match path { + Some(path) => { + let raw = fs::read_to_string(path) + .with_context(|| format!("failed to read `{}`", path.display()))?; + serde_yaml::from_str(&raw) + .with_context(|| format!("failed to parse `{}`", path.display()))? + } + None => Default::default(), + }; + Ok(config) +} + +fn start_coordinator() -> eyre::Result<()> { + let mut cmd = + Command::new(std::env::current_exe().wrap_err("failed to get current executable path")?); + cmd.arg("coordinator"); + cmd.spawn().wrap_err("failed to run `dora coordinator`")?; + + println!("started dora coordinator"); + + Ok(()) +} + +fn start_daemon() -> eyre::Result<()> { + let mut cmd = + Command::new(std::env::current_exe().wrap_err("failed to get current executable path")?); + cmd.arg("daemon"); + cmd.spawn().wrap_err("failed to run `dora daemon`")?; + + println!("started dora daemon"); + + Ok(()) +} diff --git a/binaries/coordinator/Cargo.toml b/binaries/coordinator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4c6b688773f8d01c747e2c127c3d6a5b349a16ab --- /dev/null +++ b/binaries/coordinator/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "dora-coordinator" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["tracing"] +tracing = ["dep:dora-tracing"] + +[dependencies] +eyre = "0.6.7" +futures = "0.3.21" +tokio = { version = "1.24.2", features = ["full"] } +tokio-stream = { version = "0.1.8", features = ["io-util", "net"] } +uuid = { version = "1.2.1" } +dora-core = { workspace = true } +tracing = "0.1.36" +dora-tracing = { workspace = true, optional = true } +futures-concurrency = "7.1.0" +serde_json = "1.0.86" +names = "0.14.0" +ctrlc = "3.2.5" diff --git a/binaries/coordinator/README.md b/binaries/coordinator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..09cec0420613ddfdc51c4affe484e997dfa0581e --- /dev/null +++ b/binaries/coordinator/README.md @@ -0,0 +1,26 @@ +# Coordinator + +Prototype for a process/library-based dora-rs implementation, instead of framework-based. The idea is that each operator is compiled as a separate executable. The `dora-coordinator` runtime is responsible for reading the dataflow descriptor file and launching the operators accordingly. The operators use a common library called `dora-api`, which implements the communication layer based on zenoh. + +This approach has the following advantages: + +- Less overhead + - No data transfer between a runtime and the operator + - The compiler can inline and optimize the full process +- More flexibility + - Operators can be sync or async + - They can decide how many threads and which execution model they use + - The OS ensures fair share of resources (e.g. CPU time) -> no need to cooperate with other operators + - Operators get all inputs immediately -> no need for input rules + - Keeping local state is easily possible +- Separate address spaces + - The operators are isolated from each other. + +There are drawbacks too, for example: + +- Less control + - Processes run independently -> need to cooperate with the runtime, e.g. on stop signals + - Operator migration is more difficult +- Operators are always isolated + - No way of using in-memory channels + - Local sockets and shared memory should be still possible diff --git a/binaries/coordinator/src/control.rs b/binaries/coordinator/src/control.rs new file mode 100644 index 0000000000000000000000000000000000000000..c8987a9d4c993633a1944bb760a4633ad78039de --- /dev/null +++ b/binaries/coordinator/src/control.rs @@ -0,0 +1,170 @@ +use crate::{ + tcp_utils::{tcp_receive, tcp_send}, + Event, +}; +use dora_core::topics::{ControlRequest, ControlRequestReply}; +use eyre::{eyre, Context}; +use futures::{ + future::{self, Either}, + stream::FuturesUnordered, + FutureExt, Stream, StreamExt, +}; +use futures_concurrency::future::Race; +use std::{io::ErrorKind, net::SocketAddr}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::{mpsc, oneshot}, + task::JoinHandle, +}; +use tokio_stream::wrappers::ReceiverStream; + +pub(crate) async fn control_events( + control_listen_addr: SocketAddr, + tasks: &FuturesUnordered>, +) -> eyre::Result> { + let (tx, rx) = mpsc::channel(10); + + let (finish_tx, mut finish_rx) = mpsc::channel(1); + tasks.push(tokio::spawn(listen(control_listen_addr, tx, finish_tx))); + tasks.push(tokio::spawn(async move { + while let Some(()) = finish_rx.recv().await {} + })); + + Ok(ReceiverStream::new(rx).map(Event::Control)) +} + +async fn listen( + control_listen_addr: SocketAddr, + tx: mpsc::Sender, + _finish_tx: mpsc::Sender<()>, +) { + let result = TcpListener::bind(control_listen_addr) + .await + .wrap_err("failed to listen for control messages"); + let incoming = match result { + Ok(incoming) => incoming, + Err(err) => { + let _ = tx.send(err.into()).await; + return; + } + }; + + loop { + let new_connection = incoming.accept().map(Either::Left); + let coordinator_stop = tx.closed().map(Either::Right); + let connection = match (new_connection, coordinator_stop).race().await { + future::Either::Left(connection) => connection, + future::Either::Right(()) => { + // coordinator was stopped + break; + } + }; + match connection.wrap_err("failed to connect") { + Ok((connection, _)) => { + let tx = tx.clone(); + tokio::spawn(handle_requests(connection, tx, _finish_tx.clone())); + } + Err(err) => { + if tx.blocking_send(err.into()).is_err() { + break; + } + } + } + } +} + +async fn handle_requests( + mut connection: TcpStream, + tx: mpsc::Sender, + _finish_tx: mpsc::Sender<()>, +) { + loop { + let next_request = tcp_receive(&mut connection).map(Either::Left); + let coordinator_stopped = tx.closed().map(Either::Right); + let raw = match (next_request, coordinator_stopped).race().await { + Either::Right(()) => break, + Either::Left(request) => match request { + Ok(message) => message, + Err(err) => match err.kind() { + ErrorKind::UnexpectedEof => { + tracing::trace!("Control connection closed"); + break; + } + err => { + let err = eyre!(err).wrap_err("failed to receive incoming message"); + tracing::error!("{err}"); + break; + } + }, + }, + }; + + let result = + match serde_json::from_slice(&raw).wrap_err("failed to deserialize incoming message") { + Ok(request) => handle_request(request, &tx).await, + Err(err) => Err(err), + }; + + let reply = result.unwrap_or_else(|err| ControlRequestReply::Error(format!("{err}"))); + let serialized = + match serde_json::to_vec(&reply).wrap_err("failed to serialize ControlRequestReply") { + Ok(s) => s, + Err(err) => { + tracing::error!("{err:?}"); + break; + } + }; + match tcp_send(&mut connection, &serialized).await { + Ok(()) => {} + Err(err) => match err.kind() { + ErrorKind::UnexpectedEof => { + tracing::debug!("Control connection closed while trying to send reply"); + break; + } + err => { + let err = eyre!(err).wrap_err("failed to send reply"); + tracing::error!("{err}"); + break; + } + }, + } + + if matches!(reply, ControlRequestReply::CoordinatorStopped) { + break; + } + } +} + +async fn handle_request( + request: ControlRequest, + tx: &mpsc::Sender, +) -> eyre::Result { + let (reply_tx, reply_rx) = oneshot::channel(); + let event = ControlEvent::IncomingRequest { + request, + reply_sender: reply_tx, + }; + + if tx.send(event).await.is_err() { + return Ok(ControlRequestReply::CoordinatorStopped); + } + + reply_rx + .await + .unwrap_or(Ok(ControlRequestReply::CoordinatorStopped)) +} + +#[derive(Debug)] +pub enum ControlEvent { + IncomingRequest { + request: ControlRequest, + reply_sender: oneshot::Sender>, + }, + Error(eyre::Report), +} + +impl From for ControlEvent { + fn from(err: eyre::Report) -> Self { + ControlEvent::Error(err) + } +} diff --git a/binaries/coordinator/src/lib.rs b/binaries/coordinator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..bfbe6f1029fb6a11b6266786768af84387707bb2 --- /dev/null +++ b/binaries/coordinator/src/lib.rs @@ -0,0 +1,994 @@ +use crate::{ + run::spawn_dataflow, + tcp_utils::{tcp_receive, tcp_send}, +}; +pub use control::ControlEvent; +use dora_core::{ + config::{NodeId, OperatorId}, + coordinator_messages::RegisterResult, + daemon_messages::{DaemonCoordinatorEvent, DaemonCoordinatorReply, Timestamped}, + descriptor::{Descriptor, ResolvedNode}, + message::uhlc::{self, HLC}, + topics::{ControlRequest, ControlRequestReply, DataflowId}, +}; +use eyre::{bail, eyre, ContextCompat, WrapErr}; +use futures::{stream::FuturesUnordered, Future, Stream, StreamExt}; +use futures_concurrency::stream::Merge; +use run::SpawnedDataflow; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + net::SocketAddr, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{net::TcpStream, sync::mpsc, task::JoinHandle}; +use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; +use uuid::Uuid; + +mod control; +mod listener; +mod run; +mod tcp_utils; + +pub async fn start( + bind: SocketAddr, + bind_control: SocketAddr, + external_events: impl Stream + Unpin, +) -> Result<(u16, impl Future>), eyre::ErrReport> { + let listener = listener::create_listener(bind).await?; + let port = listener + .local_addr() + .wrap_err("failed to get local addr of listener")? + .port(); + let new_daemon_connections = TcpListenerStream::new(listener).map(|c| { + c.map(Event::NewDaemonConnection) + .wrap_err("failed to open connection") + .unwrap_or_else(Event::DaemonConnectError) + }); + + let mut tasks = FuturesUnordered::new(); + let control_events = control::control_events(bind_control, &tasks) + .await + .wrap_err("failed to create control events")?; + + // Setup ctrl-c handler + let ctrlc_events = set_up_ctrlc_handler()?; + + let events = ( + external_events, + new_daemon_connections, + control_events, + ctrlc_events, + ) + .merge(); + + let future = async move { + start_inner(events, &tasks).await?; + + tracing::debug!("coordinator main loop finished, waiting on spawned tasks"); + while let Some(join_result) = tasks.next().await { + if let Err(err) = join_result { + tracing::error!("task panicked: {err}"); + } + } + tracing::debug!("all spawned tasks finished, exiting.."); + Ok(()) + }; + Ok((port, future)) +} + +// Resolve the dataflow name. +fn resolve_name( + name: String, + running_dataflows: &HashMap, + archived_dataflows: &HashMap, +) -> eyre::Result { + let uuids: Vec<_> = running_dataflows + .iter() + .filter(|(_, v)| v.name.as_deref() == Some(name.as_str())) + .map(|(k, _)| k) + .copied() + .collect(); + let archived_uuids: Vec<_> = archived_dataflows + .iter() + .filter(|(_, v)| v.name.as_deref() == Some(name.as_str())) + .map(|(k, _)| k) + .copied() + .collect(); + + if uuids.is_empty() { + if archived_uuids.is_empty() { + bail!("no dataflow with name `{name}`"); + } else if let [uuid] = archived_uuids.as_slice() { + Ok(*uuid) + } else { + // TODO: Index the archived dataflows in order to return logs based on the index. + bail!("multiple archived dataflows found with name `{name}`, Please provide the UUID instead."); + } + } else if let [uuid] = uuids.as_slice() { + Ok(*uuid) + } else { + bail!("multiple dataflows found with name `{name}`"); + } +} + +async fn start_inner( + events: impl Stream + Unpin, + tasks: &FuturesUnordered>, +) -> eyre::Result<()> { + let clock = Arc::new(HLC::default()); + + let (daemon_events_tx, daemon_events) = tokio::sync::mpsc::channel(2); + let mut daemon_events_tx = Some(daemon_events_tx); + let daemon_events = ReceiverStream::new(daemon_events); + + let daemon_heartbeat_interval = + tokio_stream::wrappers::IntervalStream::new(tokio::time::interval(Duration::from_secs(3))) + .map(|_| Event::DaemonHeartbeatInterval); + + // events that should be aborted on `dora destroy` + let (abortable_events, abort_handle) = + futures::stream::abortable((events, daemon_heartbeat_interval).merge()); + + let mut events = (abortable_events, daemon_events).merge(); + + let mut running_dataflows: HashMap = HashMap::new(); + let mut dataflow_results: HashMap>> = HashMap::new(); + let mut archived_dataflows: HashMap = HashMap::new(); + let mut daemon_connections: HashMap<_, DaemonConnection> = HashMap::new(); + + while let Some(event) = events.next().await { + if event.log() { + tracing::trace!("Handling event {event:?}"); + } + match event { + Event::NewDaemonConnection(connection) => { + connection.set_nodelay(true)?; + let events_tx = daemon_events_tx.clone(); + if let Some(events_tx) = events_tx { + let task = tokio::spawn(listener::handle_connection( + connection, + events_tx, + clock.clone(), + )); + tasks.push(task); + } else { + tracing::warn!( + "ignoring new daemon connection because events_tx was closed already" + ); + } + } + Event::DaemonConnectError(err) => { + tracing::warn!("{:?}", err.wrap_err("failed to connect to dora-daemon")); + } + Event::Daemon(event) => match event { + DaemonEvent::Register { + machine_id, + mut connection, + dora_version: daemon_version, + listen_port, + } => { + let coordinator_version: &&str = &env!("CARGO_PKG_VERSION"); + let version_check = if &daemon_version == coordinator_version { + Ok(()) + } else { + Err(format!( + "version mismatch: daemon v{daemon_version} is \ + not compatible with coordinator v{coordinator_version}" + )) + }; + let peer_ip = connection + .peer_addr() + .map(|addr| addr.ip()) + .map_err(|err| format!("failed to get peer addr of connection: {err}")); + let register_result = version_check.and(peer_ip); + + let reply: Timestamped = Timestamped { + inner: match ®ister_result { + Ok(_) => RegisterResult::Ok, + Err(err) => RegisterResult::Err(err.clone()), + }, + timestamp: clock.new_timestamp(), + }; + let send_result = tcp_send(&mut connection, &serde_json::to_vec(&reply)?).await; + match (register_result, send_result) { + (Ok(ip), Ok(())) => { + let previous = daemon_connections.insert( + machine_id.clone(), + DaemonConnection { + stream: connection, + listen_socket: (ip, listen_port).into(), + last_heartbeat: Instant::now(), + }, + ); + if let Some(_previous) = previous { + tracing::info!( + "closing previous connection `{machine_id}` on new register" + ); + } + } + (Err(err), _) => { + tracing::warn!("failed to register daemon connection for machine `{machine_id}`: {err}"); + } + (Ok(_), Err(err)) => { + tracing::warn!("failed to confirm daemon connection for machine `{machine_id}`: {err}"); + } + } + } + }, + Event::Dataflow { uuid, event } => match event { + DataflowEvent::ReadyOnMachine { + machine_id, + success, + } => { + match running_dataflows.entry(uuid) { + std::collections::hash_map::Entry::Occupied(mut entry) => { + let dataflow = entry.get_mut(); + dataflow.pending_machines.remove(&machine_id); + dataflow.init_success &= success; + if dataflow.pending_machines.is_empty() { + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::AllNodesReady { + dataflow_id: uuid, + success: dataflow.init_success, + }, + timestamp: clock.new_timestamp(), + }) + .wrap_err("failed to serialize AllNodesReady message")?; + + // notify all machines that run parts of the dataflow + for machine_id in &dataflow.machines { + let Some(connection) = daemon_connections.get_mut(machine_id) + else { + tracing::warn!( + "no daemon connection found for machine `{machine_id}`" + ); + continue; + }; + tcp_send(&mut connection.stream, &message) + .await + .wrap_err_with(|| { + format!( + "failed to send AllNodesReady({uuid}) message \ + to machine {machine_id}" + ) + })?; + } + } + } + std::collections::hash_map::Entry::Vacant(_) => { + tracing::warn!("dataflow not running on ReadyOnMachine"); + } + } + } + DataflowEvent::DataflowFinishedOnMachine { machine_id, result } => { + match running_dataflows.entry(uuid) { + std::collections::hash_map::Entry::Occupied(mut entry) => { + // Archive finished dataflow + if archived_dataflows.get(&uuid).is_none() { + archived_dataflows + .insert(uuid, ArchivedDataflow::from(entry.get())); + } + entry.get_mut().machines.remove(&machine_id); + match &result { + Ok(()) => { + tracing::info!("dataflow `{uuid}` finished successfully on machine `{machine_id}`"); + } + Err(err) => { + tracing::error!("{err:?}"); + } + } + dataflow_results + .entry(uuid) + .or_default() + .insert(machine_id, result.map_err(|err| format!("{err:?}"))); + if entry.get_mut().machines.is_empty() { + let finished_dataflow = entry.remove(); + let reply = ControlRequestReply::DataflowStopped { + uuid, + result: dataflow_results + .get(&uuid) + .map(|r| dataflow_result(r, uuid)) + .unwrap_or(Ok(())), + }; + for sender in finished_dataflow.reply_senders { + let _ = sender.send(Ok(reply.clone())); + } + } + } + std::collections::hash_map::Entry::Vacant(_) => { + tracing::warn!("dataflow not running on DataflowFinishedOnMachine"); + } + } + } + }, + + Event::Control(event) => match event { + ControlEvent::IncomingRequest { + request, + reply_sender, + } => { + match request { + ControlRequest::Start { + dataflow, + name, + local_working_dir, + } => { + let name = name.or_else(|| names::Generator::default().next()); + + let inner = async { + if let Some(name) = name.as_deref() { + // check that name is unique + if running_dataflows + .values() + .any(|d: &RunningDataflow| d.name.as_deref() == Some(name)) + { + bail!("there is already a running dataflow with name `{name}`"); + } + } + let dataflow = start_dataflow( + dataflow, + local_working_dir, + name, + &mut daemon_connections, + &clock, + ) + .await?; + Ok(dataflow) + }; + let reply = inner.await.map(|dataflow| { + let uuid = dataflow.uuid; + running_dataflows.insert(uuid, dataflow); + ControlRequestReply::DataflowStarted { uuid } + }); + let _ = reply_sender.send(reply); + } + ControlRequest::Check { dataflow_uuid } => { + let status = match &running_dataflows.get(&dataflow_uuid) { + Some(_) => ControlRequestReply::DataflowStarted { + uuid: dataflow_uuid, + }, + None => ControlRequestReply::DataflowStopped { + uuid: dataflow_uuid, + result: dataflow_results + .get(&dataflow_uuid) + .map(|r| dataflow_result(r, dataflow_uuid)) + .unwrap_or(Ok(())), + }, + }; + let _ = reply_sender.send(Ok(status)); + } + ControlRequest::Reload { + dataflow_id, + node_id, + operator_id, + } => { + let reload = async { + reload_dataflow( + &running_dataflows, + dataflow_id, + node_id, + operator_id, + &mut daemon_connections, + clock.new_timestamp(), + ) + .await?; + Result::<_, eyre::Report>::Ok(()) + }; + let reply = + reload + .await + .map(|()| ControlRequestReply::DataflowReloaded { + uuid: dataflow_id, + }); + let _ = reply_sender.send(reply); + } + ControlRequest::Stop { + dataflow_uuid, + grace_duration, + } => { + stop_dataflow_by_uuid( + &mut running_dataflows, + &dataflow_results, + dataflow_uuid, + &mut daemon_connections, + reply_sender, + clock.new_timestamp(), + grace_duration, + ) + .await?; + } + ControlRequest::StopByName { + name, + grace_duration, + } => match resolve_name(name, &running_dataflows, &archived_dataflows) { + Ok(uuid) => { + stop_dataflow_by_uuid( + &mut running_dataflows, + &dataflow_results, + uuid, + &mut daemon_connections, + reply_sender, + clock.new_timestamp(), + grace_duration, + ) + .await? + } + Err(err) => { + let _ = reply_sender.send(Err(err)); + } + }, + ControlRequest::Logs { uuid, name, node } => { + let dataflow_uuid = if let Some(uuid) = uuid { + uuid + } else if let Some(name) = name { + resolve_name(name, &running_dataflows, &archived_dataflows)? + } else { + bail!("No uuid") + }; + + let reply = retrieve_logs( + &running_dataflows, + &archived_dataflows, + dataflow_uuid, + node.into(), + &mut daemon_connections, + clock.new_timestamp(), + ) + .await + .map(ControlRequestReply::Logs); + let _ = reply_sender.send(reply); + } + ControlRequest::Destroy => { + tracing::info!("Received destroy command"); + + let reply = handle_destroy( + &running_dataflows, + &mut daemon_connections, + &abort_handle, + &mut daemon_events_tx, + &clock, + ) + .await + .map(|()| ControlRequestReply::DestroyOk); + let _ = reply_sender.send(reply); + } + ControlRequest::List => { + let mut dataflows: Vec<_> = running_dataflows.values().collect(); + dataflows.sort_by_key(|d| (&d.name, d.uuid)); + + let reply = Ok(ControlRequestReply::DataflowList { + dataflows: dataflows + .into_iter() + .map(|d| DataflowId { + uuid: d.uuid, + name: d.name.clone(), + }) + .collect(), + }); + let _ = reply_sender.send(reply); + } + ControlRequest::DaemonConnected => { + let running = !daemon_connections.is_empty(); + let _ = reply_sender + .send(Ok(ControlRequestReply::DaemonConnected(running))); + } + ControlRequest::ConnectedMachines => { + let reply = Ok(ControlRequestReply::ConnectedMachines( + daemon_connections.keys().cloned().collect(), + )); + let _ = reply_sender.send(reply); + } + } + } + ControlEvent::Error(err) => tracing::error!("{err:?}"), + }, + Event::DaemonHeartbeatInterval => { + let mut disconnected = BTreeSet::new(); + for (machine_id, connection) in &mut daemon_connections { + if connection.last_heartbeat.elapsed() > Duration::from_secs(15) { + tracing::warn!( + "no heartbeat message from machine `{machine_id}` since {:?}", + connection.last_heartbeat.elapsed() + ) + } + if connection.last_heartbeat.elapsed() > Duration::from_secs(30) { + disconnected.insert(machine_id.clone()); + continue; + } + let result: eyre::Result<()> = tokio::time::timeout( + Duration::from_millis(500), + send_heartbeat_message(&mut connection.stream, clock.new_timestamp()), + ) + .await + .wrap_err("timeout") + .and_then(|r| r) + .wrap_err_with(|| { + format!("failed to send heartbeat message to daemon at `{machine_id}`") + }); + if let Err(err) = result { + tracing::warn!("{err:?}"); + disconnected.insert(machine_id.clone()); + } + } + if !disconnected.is_empty() { + tracing::error!("Disconnecting daemons that failed watchdog: {disconnected:?}"); + for machine_id in disconnected { + daemon_connections.remove(&machine_id); + } + } + } + Event::CtrlC => { + tracing::info!("Destroying coordinator after receiving Ctrl-C signal"); + handle_destroy( + &running_dataflows, + &mut daemon_connections, + &abort_handle, + &mut daemon_events_tx, + &clock, + ) + .await?; + } + Event::DaemonHeartbeat { machine_id } => { + if let Some(connection) = daemon_connections.get_mut(&machine_id) { + connection.last_heartbeat = Instant::now(); + } + } + } + } + + tracing::info!("stopped"); + + Ok(()) +} + +async fn stop_dataflow_by_uuid( + running_dataflows: &mut HashMap, + dataflow_results: &HashMap>>, + dataflow_uuid: Uuid, + daemon_connections: &mut HashMap, + reply_sender: tokio::sync::oneshot::Sender>, + timestamp: uhlc::Timestamp, + grace_duration: Option, +) -> Result<(), eyre::ErrReport> { + let Some(dataflow) = running_dataflows.get_mut(&dataflow_uuid) else { + if let Some(result) = dataflow_results.get(&dataflow_uuid) { + let reply = ControlRequestReply::DataflowStopped { + uuid: dataflow_uuid, + result: dataflow_result(result, dataflow_uuid), + }; + let _ = reply_sender.send(Ok(reply)); + return Ok(()); + } + bail!("no known dataflow found with UUID `{dataflow_uuid}`") + }; + let stop = async { + stop_dataflow( + dataflow, + dataflow_uuid, + daemon_connections, + timestamp, + grace_duration, + ) + .await?; + Result::<_, eyre::Report>::Ok(()) + }; + match stop.await { + Ok(()) => { + dataflow.reply_senders.push(reply_sender); + } + Err(err) => { + let _ = reply_sender.send(Err(err)); + } + }; + Ok(()) +} + +fn format_error(machine: &str, err: &str) -> String { + let mut error = err + .lines() + .fold(format!("- machine `{machine}`:\n"), |mut output, line| { + output.push_str(" "); + output.push_str(line); + output.push('\n'); + output + }); + error.push('\n'); + error +} + +fn dataflow_result( + results: &BTreeMap>, + dataflow_uuid: Uuid, +) -> Result<(), String> { + let mut errors = Vec::new(); + for (machine, result) in results { + if let Err(err) = result { + errors.push(format_error(machine, err)); + } + } + + if errors.is_empty() { + Ok(()) + } else { + let mut formatted = format!("errors occurred in dataflow {dataflow_uuid}:\n"); + formatted.push_str(&errors.join("\n")); + Err(formatted) + } +} + +struct DaemonConnection { + stream: TcpStream, + listen_socket: SocketAddr, + last_heartbeat: Instant, +} + +async fn handle_destroy( + running_dataflows: &HashMap, + daemon_connections: &mut HashMap, + abortable_events: &futures::stream::AbortHandle, + daemon_events_tx: &mut Option>, + clock: &HLC, +) -> Result<(), eyre::ErrReport> { + abortable_events.abort(); + for (&uuid, dataflow) in running_dataflows { + stop_dataflow( + dataflow, + uuid, + daemon_connections, + clock.new_timestamp(), + None, + ) + .await?; + } + destroy_daemons(daemon_connections, clock.new_timestamp()).await?; + *daemon_events_tx = None; + Ok(()) +} + +async fn send_heartbeat_message( + connection: &mut TcpStream, + timestamp: uhlc::Timestamp, +) -> eyre::Result<()> { + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::Heartbeat, + timestamp, + }) + .unwrap(); + + tcp_send(connection, &message) + .await + .wrap_err("failed to send heartbeat message to daemon") +} + +struct RunningDataflow { + name: Option, + uuid: Uuid, + /// The IDs of the machines that the dataflow is running on. + machines: BTreeSet, + /// IDs of machines that are waiting until all nodes are started. + pending_machines: BTreeSet, + init_success: bool, + nodes: Vec, + + reply_senders: Vec>>, +} + +struct ArchivedDataflow { + name: Option, + nodes: Vec, +} + +impl From<&RunningDataflow> for ArchivedDataflow { + fn from(dataflow: &RunningDataflow) -> ArchivedDataflow { + ArchivedDataflow { + name: dataflow.name.clone(), + nodes: dataflow.nodes.clone(), + } + } +} + +impl PartialEq for RunningDataflow { + fn eq(&self, other: &Self) -> bool { + self.name == other.name && self.uuid == other.uuid && self.machines == other.machines + } +} + +impl Eq for RunningDataflow {} + +async fn stop_dataflow( + dataflow: &RunningDataflow, + uuid: Uuid, + daemon_connections: &mut HashMap, + timestamp: uhlc::Timestamp, + grace_duration: Option, +) -> eyre::Result<()> { + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::StopDataflow { + dataflow_id: uuid, + grace_duration, + }, + timestamp, + })?; + + for machine_id in &dataflow.machines { + let daemon_connection = daemon_connections + .get_mut(machine_id) + .wrap_err("no daemon connection")?; // TODO: take from dataflow spec + tcp_send(&mut daemon_connection.stream, &message) + .await + .wrap_err("failed to send stop message to daemon")?; + + // wait for reply + let reply_raw = tcp_receive(&mut daemon_connection.stream) + .await + .wrap_err("failed to receive stop reply from daemon")?; + match serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize stop reply from daemon")? + { + DaemonCoordinatorReply::StopResult(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to stop dataflow")?, + other => bail!("unexpected reply after sending stop: {other:?}"), + } + } + tracing::info!("successfully send stop dataflow `{uuid}` to all daemons"); + + Ok(()) +} + +async fn reload_dataflow( + running_dataflows: &HashMap, + dataflow_id: Uuid, + node_id: NodeId, + operator_id: Option, + daemon_connections: &mut HashMap, + timestamp: uhlc::Timestamp, +) -> eyre::Result<()> { + let Some(dataflow) = running_dataflows.get(&dataflow_id) else { + bail!("No running dataflow found with UUID `{dataflow_id}`") + }; + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::ReloadDataflow { + dataflow_id, + node_id, + operator_id, + }, + timestamp, + })?; + + for machine_id in &dataflow.machines { + let daemon_connection = daemon_connections + .get_mut(machine_id) + .wrap_err("no daemon connection")?; // TODO: take from dataflow spec + tcp_send(&mut daemon_connection.stream, &message) + .await + .wrap_err("failed to send reload message to daemon")?; + + // wait for reply + let reply_raw = tcp_receive(&mut daemon_connection.stream) + .await + .wrap_err("failed to receive reload reply from daemon")?; + match serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize reload reply from daemon")? + { + DaemonCoordinatorReply::ReloadResult(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to reload dataflow")?, + other => bail!("unexpected reply after sending reload: {other:?}"), + } + } + tracing::info!("successfully reloaded dataflow `{dataflow_id}`"); + + Ok(()) +} + +async fn retrieve_logs( + running_dataflows: &HashMap, + archived_dataflows: &HashMap, + dataflow_id: Uuid, + node_id: NodeId, + daemon_connections: &mut HashMap, + timestamp: uhlc::Timestamp, +) -> eyre::Result> { + let nodes = if let Some(dataflow) = archived_dataflows.get(&dataflow_id) { + dataflow.nodes.clone() + } else if let Some(dataflow) = running_dataflows.get(&dataflow_id) { + dataflow.nodes.clone() + } else { + bail!("No dataflow found with UUID `{dataflow_id}`") + }; + + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::Logs { + dataflow_id, + node_id: node_id.clone(), + }, + timestamp, + })?; + + let machine_ids: Vec = nodes + .iter() + .filter(|node| node.id == node_id) + .map(|node| node.deploy.machine.clone()) + .collect(); + + let machine_id = if let [machine_id] = &machine_ids[..] { + machine_id + } else if machine_ids.is_empty() { + bail!("No machine contains {}/{}", dataflow_id, node_id) + } else { + bail!( + "More than one machine contains {}/{}. However, it should only be present on one.", + dataflow_id, + node_id + ) + }; + + let daemon_connection = daemon_connections + .get_mut(machine_id.as_str()) + .wrap_err("no daemon connection")?; + tcp_send(&mut daemon_connection.stream, &message) + .await + .wrap_err("failed to send logs message to daemon")?; + + // wait for reply + let reply_raw = tcp_receive(&mut daemon_connection.stream) + .await + .wrap_err("failed to retrieve logs reply from daemon")?; + let reply_logs = match serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize logs reply from daemon")? + { + DaemonCoordinatorReply::Logs(logs) => logs, + other => bail!("unexpected reply after sending logs: {other:?}"), + }; + tracing::info!("successfully retrieved logs for `{dataflow_id}/{node_id}`"); + + reply_logs.map_err(|err| eyre!(err)) +} + +async fn start_dataflow( + dataflow: Descriptor, + working_dir: PathBuf, + name: Option, + daemon_connections: &mut HashMap, + clock: &HLC, +) -> eyre::Result { + let SpawnedDataflow { + uuid, + machines, + nodes, + } = spawn_dataflow(dataflow, working_dir, daemon_connections, clock).await?; + Ok(RunningDataflow { + uuid, + name, + pending_machines: if machines.len() > 1 { + machines.clone() + } else { + BTreeSet::new() + }, + init_success: true, + machines, + nodes, + reply_senders: Vec::new(), + }) +} + +async fn destroy_daemons( + daemon_connections: &mut HashMap, + timestamp: uhlc::Timestamp, +) -> eyre::Result<()> { + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::Destroy, + timestamp, + })?; + + for (machine_id, mut daemon_connection) in daemon_connections.drain() { + tcp_send(&mut daemon_connection.stream, &message) + .await + .wrap_err("failed to send destroy message to daemon")?; + + // wait for reply + let reply_raw = tcp_receive(&mut daemon_connection.stream) + .await + .wrap_err("failed to receive destroy reply from daemon")?; + match serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize destroy reply from daemon")? + { + DaemonCoordinatorReply::DestroyResult { result, .. } => result + .map_err(|e| eyre!(e)) + .wrap_err("failed to destroy dataflow")?, + other => bail!("unexpected reply after sending `destroy`: {other:?}"), + } + + tracing::info!("successfully destroyed daemon `{machine_id}`"); + } + + Ok(()) +} + +#[derive(Debug)] +pub enum Event { + NewDaemonConnection(TcpStream), + DaemonConnectError(eyre::Report), + DaemonHeartbeat { machine_id: String }, + Dataflow { uuid: Uuid, event: DataflowEvent }, + Control(ControlEvent), + Daemon(DaemonEvent), + DaemonHeartbeatInterval, + CtrlC, +} + +impl Event { + /// Whether this event should be logged. + #[allow(clippy::match_like_matches_macro)] + pub fn log(&self) -> bool { + match self { + Event::DaemonHeartbeatInterval => false, + _ => true, + } + } +} + +#[derive(Debug)] +pub enum DataflowEvent { + DataflowFinishedOnMachine { + machine_id: String, + result: eyre::Result<()>, + }, + ReadyOnMachine { + machine_id: String, + success: bool, + }, +} + +#[derive(Debug)] +pub enum DaemonEvent { + Register { + dora_version: String, + machine_id: String, + connection: TcpStream, + listen_port: u16, + }, +} + +fn set_up_ctrlc_handler() -> Result, eyre::ErrReport> { + let (ctrlc_tx, ctrlc_rx) = mpsc::channel(1); + + let mut ctrlc_sent = false; + ctrlc::set_handler(move || { + if ctrlc_sent { + tracing::warn!("received second ctrlc signal -> aborting immediately"); + std::process::abort(); + } else { + tracing::info!("received ctrlc signal"); + if ctrlc_tx.blocking_send(Event::CtrlC).is_err() { + tracing::error!("failed to report ctrl-c event to dora-coordinator"); + } + + ctrlc_sent = true; + } + }) + .wrap_err("failed to set ctrl-c handler")?; + + Ok(ReceiverStream::new(ctrlc_rx)) +} + +#[cfg(test)] +mod test { + #[test] + fn test_format_error() { + let machine = "machine A"; + let err = "foo\nbar\nbuzz"; + + // old method + let old_error = { + #[allow(clippy::format_collect)] + let err: String = err.lines().map(|line| format!(" {line}\n")).collect(); + format!("- machine `{machine}`:\n{err}\n") + }; + let new_error = super::format_error(machine, err); + assert_eq!(old_error, new_error) + } +} diff --git a/binaries/coordinator/src/listener.rs b/binaries/coordinator/src/listener.rs new file mode 100644 index 0000000000000000000000000000000000000000..86600a4bed06eb5db28956bdc6c9c9903a0a0472 --- /dev/null +++ b/binaries/coordinator/src/listener.rs @@ -0,0 +1,106 @@ +use crate::{tcp_utils::tcp_receive, DaemonEvent, DataflowEvent, Event}; +use dora_core::{coordinator_messages, daemon_messages::Timestamped, message::uhlc::HLC}; +use eyre::{eyre, Context}; +use std::{io::ErrorKind, net::SocketAddr, sync::Arc}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::mpsc, +}; + +pub async fn create_listener(bind: SocketAddr) -> eyre::Result { + let socket = match TcpListener::bind(bind).await { + Ok(socket) => socket, + Err(err) => { + return Err(eyre::Report::new(err).wrap_err("failed to create local TCP listener")) + } + }; + Ok(socket) +} + +pub async fn handle_connection( + mut connection: TcpStream, + events_tx: mpsc::Sender, + clock: Arc, +) { + loop { + // receive the next message and parse it + let raw = match tcp_receive(&mut connection).await { + Ok(data) => data, + Err(err) if err.kind() == ErrorKind::UnexpectedEof => { + break; + } + Err(err) => { + tracing::error!("{err:?}"); + continue; + } + }; + let message: Timestamped = + match serde_json::from_slice(&raw).wrap_err("failed to deserialize node message") { + Ok(e) => e, + Err(err) => { + tracing::warn!("{err:?}"); + continue; + } + }; + + if let Err(err) = clock.update_with_timestamp(&message.timestamp) { + tracing::warn!("failed to update coordinator clock: {err}"); + } + + // handle the message and translate it to a DaemonEvent + match message.inner { + coordinator_messages::CoordinatorRequest::Register { + machine_id, + dora_version, + listen_port, + } => { + let event = DaemonEvent::Register { + dora_version, + machine_id, + connection, + listen_port, + }; + let _ = events_tx.send(Event::Daemon(event)).await; + break; + } + coordinator_messages::CoordinatorRequest::Event { machine_id, event } => match event { + coordinator_messages::DaemonEvent::AllNodesReady { + dataflow_id, + success, + } => { + let event = Event::Dataflow { + uuid: dataflow_id, + event: DataflowEvent::ReadyOnMachine { + machine_id, + success, + }, + }; + if events_tx.send(event).await.is_err() { + break; + } + } + coordinator_messages::DaemonEvent::AllNodesFinished { + dataflow_id, + result, + } => { + let event = Event::Dataflow { + uuid: dataflow_id, + event: DataflowEvent::DataflowFinishedOnMachine { + machine_id, + result: result.map_err(|e| eyre!(e)), + }, + }; + if events_tx.send(event).await.is_err() { + break; + } + } + coordinator_messages::DaemonEvent::Heartbeat => { + let event = Event::DaemonHeartbeat { machine_id }; + if events_tx.send(event).await.is_err() { + break; + } + } + }, + }; + } +} diff --git a/binaries/coordinator/src/run/mod.rs b/binaries/coordinator/src/run/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f37613581caa26ec9f74bfd9fe70945ae333f122 --- /dev/null +++ b/binaries/coordinator/src/run/mod.rs @@ -0,0 +1,100 @@ +use crate::{ + tcp_utils::{tcp_receive, tcp_send}, + DaemonConnection, +}; + +use dora_core::{ + daemon_messages::{ + DaemonCoordinatorEvent, DaemonCoordinatorReply, SpawnDataflowNodes, Timestamped, + }, + descriptor::{Descriptor, ResolvedNode}, + message::uhlc::HLC, +}; +use eyre::{bail, eyre, ContextCompat, WrapErr}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + path::PathBuf, +}; +use uuid::{NoContext, Timestamp, Uuid}; + +#[tracing::instrument(skip(daemon_connections, clock))] +pub(super) async fn spawn_dataflow( + dataflow: Descriptor, + working_dir: PathBuf, + daemon_connections: &mut HashMap, + clock: &HLC, +) -> eyre::Result { + dataflow.check(&working_dir)?; + + let nodes = dataflow.resolve_aliases_and_set_defaults()?; + let uuid = Uuid::new_v7(Timestamp::now(NoContext)); + + let machines: BTreeSet<_> = nodes.iter().map(|n| n.deploy.machine.clone()).collect(); + let machine_listen_ports = machines + .iter() + .map(|m| { + daemon_connections + .get(m) + .ok_or_else(|| eyre!("no daemon listen port for machine `{m}`")) + .map(|c| (m.clone(), c.listen_socket)) + }) + .collect::, _>>()?; + + let spawn_command = SpawnDataflowNodes { + dataflow_id: uuid, + working_dir, + nodes: nodes.clone(), + machine_listen_ports, + dataflow_descriptor: dataflow, + }; + let message = serde_json::to_vec(&Timestamped { + inner: DaemonCoordinatorEvent::Spawn(spawn_command), + timestamp: clock.new_timestamp(), + })?; + + for machine in &machines { + tracing::trace!("Spawning dataflow `{uuid}` on machine `{machine}`"); + spawn_dataflow_on_machine(daemon_connections, machine, &message) + .await + .wrap_err_with(|| format!("failed to spawn dataflow on machine `{machine}`"))?; + } + + tracing::info!("successfully spawned dataflow `{uuid}`"); + + Ok(SpawnedDataflow { + uuid, + machines, + nodes, + }) +} + +async fn spawn_dataflow_on_machine( + daemon_connections: &mut HashMap, + machine: &str, + message: &[u8], +) -> Result<(), eyre::ErrReport> { + let daemon_connection = daemon_connections + .get_mut(machine) + .wrap_err_with(|| format!("no daemon connection for machine `{machine}`"))?; + tcp_send(&mut daemon_connection.stream, message) + .await + .wrap_err("failed to send spawn message to daemon")?; + let reply_raw = tcp_receive(&mut daemon_connection.stream) + .await + .wrap_err("failed to receive spawn reply from daemon")?; + match serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize spawn reply from daemon")? + { + DaemonCoordinatorReply::SpawnResult(result) => result + .map_err(|e| eyre!(e)) + .wrap_err("daemon returned an error")?, + _ => bail!("unexpected reply"), + } + Ok(()) +} + +pub struct SpawnedDataflow { + pub uuid: Uuid, + pub machines: BTreeSet, + pub nodes: Vec, +} diff --git a/binaries/coordinator/src/tcp_utils.rs b/binaries/coordinator/src/tcp_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..57003f7bb16f12bbbd21b82e26155c81cd577ea0 --- /dev/null +++ b/binaries/coordinator/src/tcp_utils.rs @@ -0,0 +1,23 @@ +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +pub async fn tcp_send(connection: &mut TcpStream, message: &[u8]) -> std::io::Result<()> { + let len_raw = (message.len() as u64).to_le_bytes(); + connection.write_all(&len_raw).await?; + connection.write_all(message).await?; + connection.flush().await?; + Ok(()) +} + +pub async fn tcp_receive(connection: &mut TcpStream) -> std::io::Result> { + let reply_len = { + let mut raw = [0; 8]; + connection.read_exact(&mut raw).await?; + u64::from_le_bytes(raw) as usize + }; + let mut reply = vec![0; reply_len]; + connection.read_exact(&mut reply).await?; + Ok(reply) +} diff --git a/binaries/daemon/Cargo.toml b/binaries/daemon/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a53607f9eae40d88a5e85e908817f814ee058cca --- /dev/null +++ b/binaries/daemon/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "dora-daemon" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["tracing", "telemetry"] +tracing = ["dep:dora-tracing"] +# telemetry flag enables to trace dora-daemon as well as send ticks with opentelemetry context +# for distributed tracing. +telemetry = ["dep:tracing-opentelemetry"] + +[dependencies] +eyre = "0.6.8" +tokio = { version = "1.20.1", features = ["full"] } +tokio-stream = { version = "0.1.11", features = ["net"] } +tracing = "0.1.36" +tracing-opentelemetry = { version = "0.18.0", optional = true } +futures-concurrency = "7.1.0" +serde_json = "1.0.86" +dora-core = { workspace = true } +flume = "0.10.14" +dora-download = { workspace = true } +dora-tracing = { workspace = true, optional = true } +dora-arrow-convert = { workspace = true } +dora-node-api = { workspace = true } +serde_yaml = "0.8.23" +uuid = { version = "1.7", features = ["v7"] } +futures = "0.3.25" +shared-memory-server = { workspace = true } +bincode = "1.3.3" +async-trait = "0.1.64" +aligned-vec = "0.5.0" +ctrlc = "3.2.5" +which = "5.0.0" +sysinfo = "0.30.11" diff --git a/binaries/daemon/src/coordinator.rs b/binaries/daemon/src/coordinator.rs new file mode 100644 index 0000000000000000000000000000000000000000..d2f86b3cc99b97883e50b7d9310c4e559b3dab71 --- /dev/null +++ b/binaries/daemon/src/coordinator.rs @@ -0,0 +1,127 @@ +use crate::{ + tcp_utils::{tcp_receive, tcp_send}, + DaemonCoordinatorEvent, +}; +use dora_core::{ + coordinator_messages::{CoordinatorRequest, RegisterResult}, + daemon_messages::{DaemonCoordinatorReply, Timestamped}, + message::uhlc::HLC, +}; +use eyre::{eyre, Context}; +use std::{io::ErrorKind, net::SocketAddr}; +use tokio::{ + net::TcpStream, + sync::{mpsc, oneshot}, +}; +use tokio_stream::{wrappers::ReceiverStream, Stream}; + +#[derive(Debug)] +pub struct CoordinatorEvent { + pub event: DaemonCoordinatorEvent, + pub reply_tx: oneshot::Sender>, +} + +pub async fn register( + addr: SocketAddr, + machine_id: String, + listen_port: u16, + clock: &HLC, +) -> eyre::Result>> { + let mut stream = TcpStream::connect(addr) + .await + .wrap_err("failed to connect to dora-coordinator")?; + stream + .set_nodelay(true) + .wrap_err("failed to set TCP_NODELAY")?; + let register = serde_json::to_vec(&Timestamped { + inner: CoordinatorRequest::Register { + dora_version: env!("CARGO_PKG_VERSION").to_owned(), + machine_id, + listen_port, + }, + timestamp: clock.new_timestamp(), + })?; + tcp_send(&mut stream, ®ister) + .await + .wrap_err("failed to send register request to dora-coordinator")?; + let reply_raw = tcp_receive(&mut stream) + .await + .wrap_err("failed to register reply from dora-coordinator")?; + let result: Timestamped = serde_json::from_slice(&reply_raw) + .wrap_err("failed to deserialize dora-coordinator reply")?; + result.inner.to_result()?; + if let Err(err) = clock.update_with_timestamp(&result.timestamp) { + tracing::warn!("failed to update timestamp after register: {err}"); + } + + tracing::info!("Connected to dora-coordinator at {:?}", addr); + + let (tx, rx) = mpsc::channel(1); + tokio::spawn(async move { + loop { + let event = match tcp_receive(&mut stream).await { + Ok(raw) => match serde_json::from_slice(&raw) { + Ok(event) => event, + Err(err) => { + let err = + eyre!(err).wrap_err("failed to deserialize incoming coordinator event"); + tracing::warn!("{err:?}"); + continue; + } + }, + Err(err) if err.kind() == ErrorKind::UnexpectedEof => break, + Err(err) => { + let err = eyre!(err).wrap_err("failed to receive incoming event"); + tracing::warn!("{err:?}"); + continue; + } + }; + let Timestamped { + inner: event, + timestamp, + } = event; + let (reply_tx, reply_rx) = oneshot::channel(); + match tx + .send(Timestamped { + inner: CoordinatorEvent { event, reply_tx }, + timestamp, + }) + .await + { + Ok(()) => {} + Err(_) => { + // receiving end of channel was closed + break; + } + } + + let Ok(reply) = reply_rx.await else { + tracing::warn!("daemon sent no reply"); + continue; + }; + if let Some(reply) = reply { + let serialized = match serde_json::to_vec(&reply) + .wrap_err("failed to serialize DaemonCoordinatorReply") + { + Ok(r) => r, + Err(err) => { + tracing::error!("{err:?}"); + continue; + } + }; + if let Err(err) = tcp_send(&mut stream, &serialized).await { + tracing::warn!("failed to send reply to coordinator: {err}"); + continue; + }; + if let DaemonCoordinatorReply::DestroyResult { notify, .. } = reply { + if let Some(notify) = notify { + let _ = notify.send(()); + } + break; + } + } + } + }); + + Ok(ReceiverStream::new(rx)) +} diff --git a/binaries/daemon/src/inter_daemon.rs b/binaries/daemon/src/inter_daemon.rs new file mode 100644 index 0000000000000000000000000000000000000000..7eb4b9485a8a5e4fbb44af3f8fc740a05b8cfa33 --- /dev/null +++ b/binaries/daemon/src/inter_daemon.rs @@ -0,0 +1,149 @@ +use crate::tcp_utils::{tcp_receive, tcp_send}; +use dora_core::daemon_messages::{InterDaemonEvent, Timestamped}; +use eyre::{Context, ContextCompat}; +use std::{collections::BTreeMap, io::ErrorKind, net::SocketAddr}; +use tokio::net::{TcpListener, TcpStream}; + +pub struct InterDaemonConnection { + socket: SocketAddr, + connection: Option, +} + +impl InterDaemonConnection { + pub fn new(socket: SocketAddr) -> Self { + Self { + socket, + connection: None, + } + } + + #[tracing::instrument(skip(self), fields(%self.socket))] + async fn connect(&mut self) -> eyre::Result<&mut TcpStream> { + match &mut self.connection { + Some(c) => Ok(c), + entry @ None => { + let connection = TcpStream::connect(self.socket) + .await + .wrap_err("failed to connect")?; + connection + .set_nodelay(true) + .wrap_err("failed to set nodelay")?; + Ok(entry.insert(connection)) + } + } + } + + pub fn socket(&self) -> SocketAddr { + self.socket + } +} + +#[tracing::instrument(skip(inter_daemon_connections))] +pub async fn send_inter_daemon_event( + target_machines: &[String], + inter_daemon_connections: &mut BTreeMap, + event: &Timestamped, +) -> eyre::Result<()> { + let message = bincode::serialize(event).wrap_err("failed to serialize InterDaemonEvent")?; + for target_machine in target_machines { + let connection = inter_daemon_connections + .get_mut(target_machine) + .wrap_err_with(|| format!("unknown target machine `{target_machine}`"))? + .connect() + .await + .wrap_err_with(|| format!("failed to connect to machine `{target_machine}`"))?; + tcp_send(connection, &message) + .await + .wrap_err_with(|| format!("failed to send event to machine `{target_machine}`"))?; + } + + Ok(()) +} + +pub async fn spawn_listener_loop( + bind: SocketAddr, + machine_id: String, + events_tx: flume::Sender>, +) -> eyre::Result { + let socket = match TcpListener::bind(bind).await { + Ok(socket) => socket, + Err(err) => { + return Err(eyre::Report::new(err).wrap_err("failed to create local TCP listener")) + } + }; + let listen_port = socket + .local_addr() + .wrap_err("failed to get local addr of socket")? + .port(); + + tokio::spawn(async move { + listener_loop(socket, events_tx).await; + tracing::debug!("inter-daemon listener loop finished for machine `{machine_id}`"); + }); + + Ok(listen_port) +} + +async fn listener_loop( + listener: TcpListener, + events_tx: flume::Sender>, +) { + loop { + match listener + .accept() + .await + .wrap_err("failed to accept new connection") + { + Err(err) => { + tracing::info!("{err}"); + } + Ok((connection, _)) => { + tokio::spawn(handle_connection_loop(connection, events_tx.clone())); + } + } + } +} + +async fn handle_connection_loop( + mut connection: TcpStream, + events_tx: flume::Sender>, +) { + if let Err(err) = connection.set_nodelay(true) { + tracing::warn!("failed to set nodelay for connection: {err}"); + } + + loop { + match receive_message(&mut connection).await { + Ok(Some(message)) => { + if events_tx.send_async(message).await.is_err() { + break; + } + } + Ok(None) => break, + Err(err) => { + tracing::warn!("{err:?}"); + break; + } + } + } +} + +async fn receive_message( + connection: &mut TcpStream, +) -> eyre::Result>> { + let raw = match tcp_receive(connection).await { + Ok(raw) => raw, + Err(err) => match err.kind() { + ErrorKind::UnexpectedEof + | ErrorKind::ConnectionAborted + | ErrorKind::ConnectionReset => return Ok(None), + _other => { + return Err(err) + .context("unexpected I/O error while trying to receive InterDaemonEvent") + } + }, + }; + bincode::deserialize(&raw) + .wrap_err("failed to deserialize DaemonRequest") + .map(Some) +} diff --git a/binaries/daemon/src/lib.rs b/binaries/daemon/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b85ee8e67c22c7dcc74de1475de89ec1d53116a --- /dev/null +++ b/binaries/daemon/src/lib.rs @@ -0,0 +1,1656 @@ +use aligned_vec::{AVec, ConstAlign}; +use coordinator::CoordinatorEvent; +use dora_core::config::{Input, OperatorId}; +use dora_core::coordinator_messages::CoordinatorRequest; +use dora_core::daemon_messages::{DataMessage, InterDaemonEvent, Timestamped}; +use dora_core::message::uhlc::{self, HLC}; +use dora_core::message::{ArrowTypeInfo, Metadata, MetadataParameters}; +use dora_core::{ + config::{DataId, InputMapping, NodeId}, + coordinator_messages::DaemonEvent, + daemon_messages::{ + self, DaemonCoordinatorEvent, DaemonCoordinatorReply, DaemonReply, DataflowId, DropToken, + SpawnDataflowNodes, + }, + descriptor::{CoreNodeKind, Descriptor, ResolvedNode}, +}; + +use eyre::{bail, eyre, Context, ContextCompat}; +use futures::{future, stream, FutureExt, TryFutureExt}; +use futures_concurrency::stream::Merge; +use inter_daemon::InterDaemonConnection; +use pending::PendingNodes; +use shared_memory_server::ShmemConf; +use std::sync::Arc; +use std::time::Instant; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap}, + io, + net::SocketAddr, + path::{Path, PathBuf}, + time::Duration, +}; +use sysinfo::Pid; +use tcp_utils::tcp_send; +use tokio::fs::File; +use tokio::io::AsyncReadExt; +use tokio::net::TcpStream; +use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::oneshot::Sender; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt}; +use tracing::{error, warn}; +use uuid::{NoContext, Timestamp, Uuid}; + +mod coordinator; +mod inter_daemon; +mod log; +mod node_communication; +mod pending; +mod spawn; +mod tcp_utils; + +#[cfg(feature = "telemetry")] +use dora_tracing::telemetry::serialize_context; +#[cfg(feature = "telemetry")] +use tracing_opentelemetry::OpenTelemetrySpanExt; + +use crate::pending::DataflowStatus; + +pub struct Daemon { + running: HashMap, + working_dir: HashMap, + + events_tx: mpsc::Sender>, + + coordinator_connection: Option, + last_coordinator_heartbeat: Instant, + inter_daemon_connections: BTreeMap, + machine_id: String, + + /// used for testing and examples + exit_when_done: Option>, + /// used to record dataflow results when `exit_when_done` is used + dataflow_errors: BTreeMap>, + + clock: Arc, +} + +impl Daemon { + pub async fn run( + coordinator_addr: SocketAddr, + machine_id: String, + bind_addr: SocketAddr, + ) -> eyre::Result<()> { + let clock = Arc::new(HLC::default()); + + let ctrlc_events = set_up_ctrlc_handler(clock.clone())?; + + // spawn listen loop + let (events_tx, events_rx) = flume::bounded(10); + let listen_port = + inter_daemon::spawn_listener_loop(bind_addr, machine_id.clone(), events_tx).await?; + let daemon_events = events_rx.into_stream().map(|e| Timestamped { + inner: Event::Daemon(e.inner), + timestamp: e.timestamp, + }); + + // connect to the coordinator + let coordinator_events = + coordinator::register(coordinator_addr, machine_id.clone(), listen_port, &clock) + .await + .wrap_err("failed to connect to dora-coordinator")? + .map( + |Timestamped { + inner: event, + timestamp, + }| Timestamped { + inner: Event::Coordinator(event), + timestamp, + }, + ); + + Self::run_general( + (coordinator_events, ctrlc_events, daemon_events).merge(), + Some(coordinator_addr), + machine_id, + None, + clock, + ) + .await + .map(|_| ()) + } + + pub async fn run_dataflow(dataflow_path: &Path) -> eyre::Result<()> { + let working_dir = dataflow_path + .canonicalize() + .context("failed to canoncialize dataflow path")? + .parent() + .ok_or_else(|| eyre::eyre!("canonicalized dataflow path has no parent"))? + .to_owned(); + + let descriptor = Descriptor::read(dataflow_path).await?; + descriptor.check(&working_dir)?; + let nodes = descriptor.resolve_aliases_and_set_defaults()?; + + let spawn_command = SpawnDataflowNodes { + dataflow_id: Uuid::new_v7(Timestamp::now(NoContext)), + working_dir, + nodes, + machine_listen_ports: BTreeMap::new(), + dataflow_descriptor: descriptor, + }; + + let clock = Arc::new(HLC::default()); + + let exit_when_done = spawn_command + .nodes + .iter() + .map(|n| (spawn_command.dataflow_id, n.id.clone())) + .collect(); + let (reply_tx, reply_rx) = oneshot::channel(); + let timestamp = clock.new_timestamp(); + let coordinator_events = stream::once(async move { + Timestamped { + inner: Event::Coordinator(CoordinatorEvent { + event: DaemonCoordinatorEvent::Spawn(spawn_command), + reply_tx, + }), + timestamp, + } + }); + let run_result = Self::run_general( + Box::pin(coordinator_events), + None, + "".to_string(), + Some(exit_when_done), + clock, + ); + + let spawn_result = reply_rx + .map_err(|err| eyre!("failed to receive spawn result: {err}")) + .and_then(|r| async { + match r { + Some(DaemonCoordinatorReply::SpawnResult(result)) => { + result.map_err(|err| eyre!(err)) + } + _ => Err(eyre!("unexpected spawn reply")), + } + }); + + let (dataflow_errors, ()) = future::try_join(run_result, spawn_result).await?; + + if dataflow_errors.is_empty() { + Ok(()) + } else { + let mut output = "some nodes failed:".to_owned(); + for (dataflow, node_errors) in dataflow_errors { + for (node, error) in node_errors { + use std::fmt::Write; + write!(&mut output, "\n - {dataflow}/{node}: {error}").unwrap(); + } + } + bail!("{output}"); + } + } + + async fn run_general( + external_events: impl Stream> + Unpin, + coordinator_addr: Option, + machine_id: String, + exit_when_done: Option>, + clock: Arc, + ) -> eyre::Result>> { + let coordinator_connection = match coordinator_addr { + Some(addr) => { + let stream = TcpStream::connect(addr) + .await + .wrap_err("failed to connect to dora-coordinator")?; + stream + .set_nodelay(true) + .wrap_err("failed to set TCP_NODELAY")?; + Some(stream) + } + None => None, + }; + + let (dora_events_tx, dora_events_rx) = mpsc::channel(5); + let daemon = Self { + running: HashMap::new(), + working_dir: HashMap::new(), + events_tx: dora_events_tx, + coordinator_connection, + last_coordinator_heartbeat: Instant::now(), + inter_daemon_connections: BTreeMap::new(), + machine_id, + exit_when_done, + dataflow_errors: BTreeMap::new(), + clock, + }; + + let dora_events = ReceiverStream::new(dora_events_rx); + let watchdog_clock = daemon.clock.clone(); + let watchdog_interval = tokio_stream::wrappers::IntervalStream::new(tokio::time::interval( + Duration::from_secs(5), + )) + .map(|_| Timestamped { + inner: Event::HeartbeatInterval, + timestamp: watchdog_clock.new_timestamp(), + }); + let events = (external_events, dora_events, watchdog_interval).merge(); + daemon.run_inner(events).await + } + + #[tracing::instrument(skip(incoming_events, self), fields(%self.machine_id))] + async fn run_inner( + mut self, + incoming_events: impl Stream> + Unpin, + ) -> eyre::Result>> { + let mut events = incoming_events; + + while let Some(event) = events.next().await { + let Timestamped { inner, timestamp } = event; + if let Err(err) = self.clock.update_with_timestamp(×tamp) { + tracing::warn!("failed to update HLC with incoming event timestamp: {err}"); + } + + match inner { + Event::Coordinator(CoordinatorEvent { event, reply_tx }) => { + let status = self.handle_coordinator_event(event, reply_tx).await?; + + match status { + RunStatus::Continue => {} + RunStatus::Exit => break, + } + } + Event::Daemon(event) => { + self.handle_inter_daemon_event(event).await?; + } + Event::Node { + dataflow_id: dataflow, + node_id, + event, + } => self.handle_node_event(event, dataflow, node_id).await?, + Event::Dora(event) => match self.handle_dora_event(event).await? { + RunStatus::Continue => {} + RunStatus::Exit => break, + }, + Event::HeartbeatInterval => { + if let Some(connection) = &mut self.coordinator_connection { + let msg = serde_json::to_vec(&Timestamped { + inner: CoordinatorRequest::Event { + machine_id: self.machine_id.clone(), + event: DaemonEvent::Heartbeat, + }, + timestamp: self.clock.new_timestamp(), + })?; + tcp_send(connection, &msg) + .await + .wrap_err("failed to send watchdog message to dora-coordinator")?; + + if self.last_coordinator_heartbeat.elapsed() > Duration::from_secs(20) { + bail!("lost connection to coordinator") + } + } + } + Event::CtrlC => { + for dataflow in self.running.values_mut() { + dataflow.stop_all(&self.clock, None).await; + } + } + } + } + + Ok(self.dataflow_errors) + } + + async fn handle_coordinator_event( + &mut self, + event: DaemonCoordinatorEvent, + reply_tx: Sender>, + ) -> eyre::Result { + let status = match event { + DaemonCoordinatorEvent::Spawn(SpawnDataflowNodes { + dataflow_id, + working_dir, + nodes, + machine_listen_ports, + dataflow_descriptor, + }) => { + match dataflow_descriptor.communication.remote { + dora_core::config::RemoteCommunicationConfig::Tcp => {} + } + for (machine_id, socket) in machine_listen_ports { + match self.inter_daemon_connections.entry(machine_id) { + std::collections::btree_map::Entry::Vacant(entry) => { + entry.insert(InterDaemonConnection::new(socket)); + } + std::collections::btree_map::Entry::Occupied(mut entry) => { + if entry.get().socket() != socket { + entry.insert(InterDaemonConnection::new(socket)); + } + } + } + } + + let result = self + .spawn_dataflow(dataflow_id, working_dir, nodes, dataflow_descriptor) + .await; + if let Err(err) = &result { + tracing::error!("{err:?}"); + } + let reply = + DaemonCoordinatorReply::SpawnResult(result.map_err(|err| format!("{err:?}"))); + let _ = reply_tx.send(Some(reply)).map_err(|_| { + error!("could not send `SpawnResult` reply from daemon to coordinator") + }); + RunStatus::Continue + } + DaemonCoordinatorEvent::AllNodesReady { + dataflow_id, + success, + } => { + match self.running.get_mut(&dataflow_id) { + Some(dataflow) => { + dataflow + .pending_nodes + .handle_external_all_nodes_ready(success) + .await?; + if success { + tracing::info!("coordinator reported that all nodes are ready, starting dataflow `{dataflow_id}`"); + dataflow.start(&self.events_tx, &self.clock).await?; + } + } + None => { + tracing::warn!( + "received AllNodesReady for unknown dataflow (ID `{dataflow_id}`)" + ); + } + } + let _ = reply_tx.send(None).map_err(|_| { + error!("could not send `AllNodesReady` reply from daemon to coordinator") + }); + RunStatus::Continue + } + DaemonCoordinatorEvent::Logs { + dataflow_id, + node_id, + } => { + match self.working_dir.get(&dataflow_id) { + Some(working_dir) => { + let working_dir = working_dir.clone(); + tokio::spawn(async move { + let logs = async { + let mut file = + File::open(log::log_path(&working_dir, &dataflow_id, &node_id)) + .await + .wrap_err(format!( + "Could not open log file: {:#?}", + log::log_path(&working_dir, &dataflow_id, &node_id) + ))?; + + let mut contents = vec![]; + file.read_to_end(&mut contents) + .await + .wrap_err("Could not read content of log file")?; + Result::, eyre::Report>::Ok(contents) + } + .await + .map_err(|err| format!("{err:?}")); + let _ = reply_tx + .send(Some(DaemonCoordinatorReply::Logs(logs))) + .map_err(|_| { + error!("could not send logs reply from daemon to coordinator") + }); + }); + } + None => { + tracing::warn!("received Logs for unknown dataflow (ID `{dataflow_id}`)"); + let _ = reply_tx.send(None).map_err(|_| { + error!( + "could not send `AllNodesReady` reply from daemon to coordinator" + ) + }); + } + } + RunStatus::Continue + } + DaemonCoordinatorEvent::ReloadDataflow { + dataflow_id, + node_id, + operator_id, + } => { + let result = self.send_reload(dataflow_id, node_id, operator_id).await; + let reply = + DaemonCoordinatorReply::ReloadResult(result.map_err(|err| format!("{err:?}"))); + let _ = reply_tx + .send(Some(reply)) + .map_err(|_| error!("could not send reload reply from daemon to coordinator")); + RunStatus::Continue + } + DaemonCoordinatorEvent::StopDataflow { + dataflow_id, + grace_duration, + } => { + let dataflow = self + .running + .get_mut(&dataflow_id) + .wrap_err_with(|| format!("no running dataflow with ID `{dataflow_id}`"))?; + // .stop_all(&self.clock.clone(), grace_duration); + + let reply = DaemonCoordinatorReply::StopResult(Ok(())); + let _ = reply_tx + .send(Some(reply)) + .map_err(|_| error!("could not send stop reply from daemon to coordinator")); + + dataflow.stop_all(&self.clock, grace_duration).await; + RunStatus::Continue + } + DaemonCoordinatorEvent::Destroy => { + tracing::info!("received destroy command -> exiting"); + let (notify_tx, notify_rx) = oneshot::channel(); + let reply = DaemonCoordinatorReply::DestroyResult { + result: Ok(()), + notify: Some(notify_tx), + }; + let _ = reply_tx + .send(Some(reply)) + .map_err(|_| error!("could not send destroy reply from daemon to coordinator")); + // wait until the reply is sent out + if notify_rx.await.is_err() { + tracing::warn!("no confirmation received for DestroyReply"); + } + RunStatus::Exit + } + DaemonCoordinatorEvent::Heartbeat => { + self.last_coordinator_heartbeat = Instant::now(); + let _ = reply_tx.send(None); + RunStatus::Continue + } + }; + Ok(status) + } + + async fn handle_inter_daemon_event(&mut self, event: InterDaemonEvent) -> eyre::Result<()> { + match event { + InterDaemonEvent::Output { + dataflow_id, + node_id, + output_id, + metadata, + data, + } => { + let inner = async { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("send out failed: no running dataflow with ID `{dataflow_id}`") + })?; + send_output_to_local_receivers( + node_id.clone(), + output_id.clone(), + dataflow, + &metadata, + data.map(DataMessage::Vec), + &self.clock, + ) + .await?; + Result::<_, eyre::Report>::Ok(()) + }; + if let Err(err) = inner + .await + .wrap_err("failed to forward remote output to local receivers") + { + tracing::warn!("{err:?}") + } + Ok(()) + } + InterDaemonEvent::InputsClosed { + dataflow_id, + inputs, + } => { + tracing::debug!(?dataflow_id, ?inputs, "received InputsClosed event"); + let inner = async { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("send out failed: no running dataflow with ID `{dataflow_id}`") + })?; + for (receiver_id, input_id) in &inputs { + close_input(dataflow, receiver_id, input_id, &self.clock); + } + Result::<(), eyre::Report>::Ok(()) + }; + if let Err(err) = inner + .await + .wrap_err("failed to handle InputsClosed event sent by coordinator") + { + tracing::warn!("{err:?}") + } + Ok(()) + } + } + } + + async fn spawn_dataflow( + &mut self, + dataflow_id: uuid::Uuid, + working_dir: PathBuf, + nodes: Vec, + dataflow_descriptor: Descriptor, + ) -> eyre::Result<()> { + let dataflow = RunningDataflow::new(dataflow_id, self.machine_id.clone()); + let dataflow = match self.running.entry(dataflow_id) { + std::collections::hash_map::Entry::Vacant(entry) => { + self.working_dir.insert(dataflow_id, working_dir.clone()); + entry.insert(dataflow) + } + std::collections::hash_map::Entry::Occupied(_) => { + bail!("there is already a running dataflow with ID `{dataflow_id}`") + } + }; + + for node in nodes { + let local = node.deploy.machine == self.machine_id; + + let inputs = node_inputs(&node); + for (input_id, input) in inputs { + if local { + dataflow + .open_inputs + .entry(node.id.clone()) + .or_default() + .insert(input_id.clone()); + match input.mapping { + InputMapping::User(mapping) => { + dataflow + .mappings + .entry(OutputId(mapping.source, mapping.output)) + .or_default() + .insert((node.id.clone(), input_id)); + } + InputMapping::Timer { interval } => { + dataflow + .timers + .entry(interval) + .or_default() + .insert((node.id.clone(), input_id)); + } + } + } else if let InputMapping::User(mapping) = input.mapping { + dataflow + .open_external_mappings + .entry(OutputId(mapping.source, mapping.output)) + .or_default() + .entry(node.deploy.machine.clone()) + .or_default() + .insert((node.id.clone(), input_id)); + } + } + if local { + dataflow.pending_nodes.insert(node.id.clone()); + + let node_id = node.id.clone(); + match spawn::spawn_node( + dataflow_id, + &working_dir, + node, + self.events_tx.clone(), + dataflow_descriptor.clone(), + self.clock.clone(), + ) + .await + .wrap_err_with(|| format!("failed to spawn node `{node_id}`")) + { + Ok(pid) => { + dataflow + .running_nodes + .insert(node_id.clone(), RunningNode { pid }); + } + Err(err) => { + tracing::error!("{err:?}"); + dataflow + .pending_nodes + .handle_node_stop( + &node_id, + &mut self.coordinator_connection, + &self.clock, + ) + .await?; + } + } + } else { + dataflow.pending_nodes.set_external_nodes(true); + } + } + + Ok(()) + } + + async fn handle_node_event( + &mut self, + event: DaemonNodeEvent, + dataflow_id: DataflowId, + node_id: NodeId, + ) -> eyre::Result<()> { + match event { + DaemonNodeEvent::Subscribe { + event_sender, + reply_sender, + } => { + let dataflow = self.running.get_mut(&dataflow_id).ok_or_else(|| { + format!("subscribe failed: no running dataflow with ID `{dataflow_id}`") + }); + + match dataflow { + Err(err) => { + let _ = reply_sender.send(DaemonReply::Result(Err(err))); + } + Ok(dataflow) => { + tracing::debug!("node `{node_id}` is ready"); + Self::subscribe(dataflow, node_id.clone(), event_sender, &self.clock).await; + + let status = dataflow + .pending_nodes + .handle_node_subscription( + node_id.clone(), + reply_sender, + &mut self.coordinator_connection, + &self.clock, + ) + .await?; + match status { + DataflowStatus::AllNodesReady => { + tracing::info!( + "all nodes are ready, starting dataflow `{dataflow_id}`" + ); + dataflow.start(&self.events_tx, &self.clock).await?; + } + DataflowStatus::Pending => {} + } + } + } + } + DaemonNodeEvent::SubscribeDrop { + event_sender, + reply_sender, + } => { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("failed to subscribe: no running dataflow with ID `{dataflow_id}`") + })?; + dataflow.drop_channels.insert(node_id, event_sender); + let _ = reply_sender.send(DaemonReply::Result(Ok(()))); + } + DaemonNodeEvent::CloseOutputs { + outputs, + reply_sender, + } => { + // notify downstream nodes + let inner = async { + let dataflow = self + .running + .get_mut(&dataflow_id) + .wrap_err_with(|| format!("failed to get downstream nodes: no running dataflow with ID `{dataflow_id}`"))?; + send_input_closed_events( + dataflow, + &mut self.inter_daemon_connections, + |OutputId(source_id, output_id)| { + source_id == &node_id && outputs.contains(output_id) + }, + &self.clock, + ) + .await + }; + + let reply = inner.await.map_err(|err| format!("{err:?}")); + let _ = reply_sender.send(DaemonReply::Result(reply)); + } + DaemonNodeEvent::OutputsDone { reply_sender } => { + let result = match self.running.get_mut(&dataflow_id) { + Some(dataflow) => { + Self::handle_outputs_done(dataflow, &mut self.inter_daemon_connections, &node_id, &self.clock) + .await + }, + None => Err(eyre!("failed to get downstream nodes: no running dataflow with ID `{dataflow_id}`")), + }; + + let _ = reply_sender.send(DaemonReply::Result( + result.map_err(|err| format!("{err:?}")), + )); + } + DaemonNodeEvent::SendOut { + output_id, + metadata, + data, + } => { + self.send_out(dataflow_id, node_id, output_id, metadata, data) + .await? + } + DaemonNodeEvent::ReportDrop { tokens } => { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!( + "failed to get handle drop tokens: \ + no running dataflow with ID `{dataflow_id}`" + ) + })?; + + for token in tokens { + match dataflow.pending_drop_tokens.get_mut(&token) { + Some(info) => { + if info.pending_nodes.remove(&node_id) { + dataflow.check_drop_token(token, &self.clock).await?; + } else { + tracing::warn!( + "node `{node_id}` is not pending for drop token `{token:?}`" + ); + } + } + None => tracing::warn!("unknown drop token `{token:?}`"), + } + } + } + DaemonNodeEvent::EventStreamDropped { reply_sender } => { + let inner = async { + let dataflow = self + .running + .get_mut(&dataflow_id) + .wrap_err_with(|| format!("no running dataflow with ID `{dataflow_id}`"))?; + dataflow.subscribe_channels.remove(&node_id); + Result::<_, eyre::Error>::Ok(()) + }; + + let reply = inner.await.map_err(|err| format!("{err:?}")); + let _ = reply_sender.send(DaemonReply::Result(reply)); + } + } + Ok(()) + } + + async fn send_reload( + &mut self, + dataflow_id: Uuid, + node_id: NodeId, + operator_id: Option, + ) -> Result<(), eyre::ErrReport> { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("Reload failed: no running dataflow with ID `{dataflow_id}`") + })?; + if let Some(channel) = dataflow.subscribe_channels.get(&node_id) { + match send_with_timestamp( + channel, + daemon_messages::NodeEvent::Reload { operator_id }, + &self.clock, + ) { + Ok(()) => {} + Err(_) => { + dataflow.subscribe_channels.remove(&node_id); + } + } + } + Ok(()) + } + + async fn send_out( + &mut self, + dataflow_id: Uuid, + node_id: NodeId, + output_id: DataId, + metadata: dora_core::message::Metadata, + data: Option, + ) -> Result<(), eyre::ErrReport> { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("send out failed: no running dataflow with ID `{dataflow_id}`") + })?; + let data_bytes = send_output_to_local_receivers( + node_id.clone(), + output_id.clone(), + dataflow, + &metadata, + data, + &self.clock, + ) + .await?; + + let output_id = OutputId(node_id, output_id); + let remote_receivers: Vec<_> = dataflow + .open_external_mappings + .get(&output_id) + .map(|m| m.keys().cloned().collect()) + .unwrap_or_default(); + if !remote_receivers.is_empty() { + let event = Timestamped { + inner: InterDaemonEvent::Output { + dataflow_id, + node_id: output_id.0, + output_id: output_id.1, + metadata, + data: data_bytes, + }, + timestamp: self.clock.new_timestamp(), + }; + inter_daemon::send_inter_daemon_event( + &remote_receivers, + &mut self.inter_daemon_connections, + &event, + ) + .await + .wrap_err("failed to forward output to remote receivers")?; + } + + Ok(()) + } + + async fn subscribe( + dataflow: &mut RunningDataflow, + node_id: NodeId, + event_sender: UnboundedSender>, + clock: &HLC, + ) { + // some inputs might have been closed already -> report those events + let closed_inputs = dataflow + .mappings + .values() + .flatten() + .filter(|(node, _)| node == &node_id) + .map(|(_, input)| input) + .filter(|input| { + dataflow + .open_inputs + .get(&node_id) + .map(|open_inputs| !open_inputs.contains(*input)) + .unwrap_or(true) + }); + for input_id in closed_inputs { + let _ = send_with_timestamp( + &event_sender, + daemon_messages::NodeEvent::InputClosed { + id: input_id.clone(), + }, + clock, + ); + } + if dataflow.open_inputs(&node_id).is_empty() { + let _ = send_with_timestamp( + &event_sender, + daemon_messages::NodeEvent::AllInputsClosed, + clock, + ); + } + + // if a stop event was already sent for the dataflow, send it to + // the newly connected node too + if dataflow.stop_sent { + let _ = send_with_timestamp(&event_sender, daemon_messages::NodeEvent::Stop, clock); + } + + dataflow.subscribe_channels.insert(node_id, event_sender); + } + + #[tracing::instrument(skip(dataflow, inter_daemon_connections, clock), fields(uuid = %dataflow.id), level = "trace")] + async fn handle_outputs_done( + dataflow: &mut RunningDataflow, + inter_daemon_connections: &mut BTreeMap, + node_id: &NodeId, + clock: &HLC, + ) -> eyre::Result<()> { + send_input_closed_events( + dataflow, + inter_daemon_connections, + |OutputId(source_id, _)| source_id == node_id, + clock, + ) + .await?; + dataflow.drop_channels.remove(node_id); + Ok(()) + } + + async fn handle_node_stop(&mut self, dataflow_id: Uuid, node_id: &NodeId) -> eyre::Result<()> { + let dataflow = self.running.get_mut(&dataflow_id).wrap_err_with(|| { + format!("failed to get downstream nodes: no running dataflow with ID `{dataflow_id}`") + })?; + + dataflow + .pending_nodes + .handle_node_stop(node_id, &mut self.coordinator_connection, &self.clock) + .await?; + + Self::handle_outputs_done( + dataflow, + &mut self.inter_daemon_connections, + node_id, + &self.clock, + ) + .await?; + + dataflow.running_nodes.remove(node_id); + if dataflow.running_nodes.is_empty() { + let result = match self.dataflow_errors.get(&dataflow.id) { + None => Ok(()), + Some(errors) => { + let mut output = "some nodes failed:".to_owned(); + for (node, error) in errors { + use std::fmt::Write; + write!(&mut output, "\n - {node}: {error}").unwrap(); + } + Err(output) + } + }; + tracing::info!( + "Dataflow `{dataflow_id}` finished on machine `{}`", + self.machine_id + ); + if let Some(connection) = &mut self.coordinator_connection { + let msg = serde_json::to_vec(&Timestamped { + inner: CoordinatorRequest::Event { + machine_id: self.machine_id.clone(), + event: DaemonEvent::AllNodesFinished { + dataflow_id, + result, + }, + }, + timestamp: self.clock.new_timestamp(), + })?; + tcp_send(connection, &msg) + .await + .wrap_err("failed to report dataflow finish to dora-coordinator")?; + } + self.running.remove(&dataflow_id); + } + Ok(()) + } + + async fn handle_dora_event(&mut self, event: DoraEvent) -> eyre::Result { + match event { + DoraEvent::Timer { + dataflow_id, + interval, + metadata, + } => { + let Some(dataflow) = self.running.get_mut(&dataflow_id) else { + tracing::warn!("Timer event for unknown dataflow `{dataflow_id}`"); + return Ok(RunStatus::Continue); + }; + + let Some(subscribers) = dataflow.timers.get(&interval) else { + return Ok(RunStatus::Continue); + }; + + let mut closed = Vec::new(); + for (receiver_id, input_id) in subscribers { + let Some(channel) = dataflow.subscribe_channels.get(receiver_id) else { + continue; + }; + + let send_result = send_with_timestamp( + channel, + daemon_messages::NodeEvent::Input { + id: input_id.clone(), + metadata: metadata.clone(), + data: None, + }, + &self.clock, + ); + match send_result { + Ok(()) => {} + Err(_) => { + closed.push(receiver_id); + } + } + } + for id in closed { + dataflow.subscribe_channels.remove(id); + } + } + DoraEvent::Logs { + dataflow_id, + output_id, + message, + metadata, + } => { + let Some(dataflow) = self.running.get_mut(&dataflow_id) else { + tracing::warn!("Logs event for unknown dataflow `{dataflow_id}`"); + return Ok(RunStatus::Continue); + }; + + let Some(subscribers) = dataflow.mappings.get(&output_id) else { + tracing::warn!( + "No subscribers found for {:?} in {:?}", + output_id, + dataflow.mappings + ); + return Ok(RunStatus::Continue); + }; + + let mut closed = Vec::new(); + for (receiver_id, input_id) in subscribers { + let Some(channel) = dataflow.subscribe_channels.get(receiver_id) else { + tracing::warn!("No subscriber channel found for {:?}", output_id); + continue; + }; + + let send_result = send_with_timestamp( + channel, + daemon_messages::NodeEvent::Input { + id: input_id.clone(), + metadata: metadata.clone(), + data: Some(message.clone()), + }, + &self.clock, + ); + match send_result { + Ok(()) => {} + Err(_) => { + closed.push(receiver_id); + } + } + } + for id in closed { + dataflow.subscribe_channels.remove(id); + } + } + DoraEvent::SpawnedNodeResult { + dataflow_id, + node_id, + exit_status, + } => { + let node_error = match exit_status { + NodeExitStatus::Success => { + tracing::info!("node {dataflow_id}/{node_id} finished successfully"); + None + } + NodeExitStatus::IoError(err) => { + let err = eyre!(err).wrap_err(format!( + " + I/O error while waiting for node `{dataflow_id}/{node_id}. + + Check logs using: dora logs {dataflow_id} {node_id} + " + )); + tracing::error!("{err:?}"); + Some(err) + } + NodeExitStatus::ExitCode(code) => { + let err = eyre!( + " + {dataflow_id}/{node_id} failed with exit code {code}. + + Check logs using: dora logs {dataflow_id} {node_id} + " + ); + tracing::error!("{err}"); + Some(err) + } + NodeExitStatus::Signal(signal) => { + let signal: Cow<_> = match signal { + 1 => "SIGHUP".into(), + 2 => "SIGINT".into(), + 3 => "SIGQUIT".into(), + 4 => "SIGILL".into(), + 6 => "SIGABRT".into(), + 8 => "SIGFPE".into(), + 9 => "SIGKILL".into(), + 11 => "SIGSEGV".into(), + 13 => "SIGPIPE".into(), + 14 => "SIGALRM".into(), + 15 => "SIGTERM".into(), + 22 => "SIGABRT".into(), + 23 => "NSIG".into(), + + other => other.to_string().into(), + }; + let err = eyre!( + " + {dataflow_id}/{node_id} failed with signal `{signal}` + + Check logs using: dora logs {dataflow_id} {node_id} + " + ); + tracing::error!("{err}"); + Some(err) + } + NodeExitStatus::Unknown => { + let err = eyre!( + " + {dataflow_id}/{node_id} failed with unknown exit code + + Check logs using: dora logs {dataflow_id} {node_id} + " + ); + tracing::error!("{err}"); + Some(err) + } + }; + + if let Some(err) = node_error { + self.dataflow_errors + .entry(dataflow_id) + .or_default() + .insert(node_id.clone(), err); + } + + self.handle_node_stop(dataflow_id, &node_id).await?; + + if let Some(exit_when_done) = &mut self.exit_when_done { + exit_when_done.remove(&(dataflow_id, node_id)); + if exit_when_done.is_empty() { + tracing::info!( + "exiting daemon because all required dataflows are finished" + ); + return Ok(RunStatus::Exit); + } + } + } + } + Ok(RunStatus::Continue) + } +} + +async fn send_output_to_local_receivers( + node_id: NodeId, + output_id: DataId, + dataflow: &mut RunningDataflow, + metadata: &dora_core::message::Metadata, + data: Option, + clock: &HLC, +) -> Result>>, eyre::ErrReport> { + let timestamp = metadata.timestamp(); + let empty_set = BTreeSet::new(); + let output_id = OutputId(node_id, output_id); + let local_receivers = dataflow.mappings.get(&output_id).unwrap_or(&empty_set); + let OutputId(node_id, _) = output_id; + let mut closed = Vec::new(); + for (receiver_id, input_id) in local_receivers { + if let Some(channel) = dataflow.subscribe_channels.get(receiver_id) { + let item = daemon_messages::NodeEvent::Input { + id: input_id.clone(), + metadata: metadata.clone(), + data: data.clone(), + }; + match channel.send(Timestamped { + inner: item, + timestamp, + }) { + Ok(()) => { + if let Some(token) = data.as_ref().and_then(|d| d.drop_token()) { + dataflow + .pending_drop_tokens + .entry(token) + .or_insert_with(|| DropTokenInformation { + owner: node_id.clone(), + pending_nodes: Default::default(), + }) + .pending_nodes + .insert(receiver_id.clone()); + } + } + Err(_) => { + closed.push(receiver_id); + } + } + } + } + for id in closed { + dataflow.subscribe_channels.remove(id); + } + let (data_bytes, drop_token) = match data { + None => (None, None), + Some(DataMessage::SharedMemory { + shared_memory_id, + len, + drop_token, + }) => { + let memory = ShmemConf::new() + .os_id(shared_memory_id) + .open() + .wrap_err("failed to map shared memory output")?; + let data = Some(AVec::from_slice(1, &unsafe { memory.as_slice() }[..len])); + (data, Some(drop_token)) + } + Some(DataMessage::Vec(v)) => (Some(v), None), + }; + if let Some(token) = drop_token { + // insert token into `pending_drop_tokens` even if there are no local subscribers + dataflow + .pending_drop_tokens + .entry(token) + .or_insert_with(|| DropTokenInformation { + owner: node_id.clone(), + pending_nodes: Default::default(), + }); + // check if all local subscribers are finished with the token + dataflow.check_drop_token(token, clock).await?; + } + Ok(data_bytes) +} + +fn node_inputs(node: &ResolvedNode) -> BTreeMap { + match &node.kind { + CoreNodeKind::Custom(n) => n.run_config.inputs.clone(), + CoreNodeKind::Runtime(n) => runtime_node_inputs(n), + } +} + +fn runtime_node_inputs(n: &dora_core::descriptor::RuntimeNode) -> BTreeMap { + n.operators + .iter() + .flat_map(|operator| { + operator.config.inputs.iter().map(|(input_id, mapping)| { + ( + DataId::from(format!("{}/{input_id}", operator.id)), + mapping.clone(), + ) + }) + }) + .collect() +} + +fn runtime_node_outputs(n: &dora_core::descriptor::RuntimeNode) -> BTreeSet { + n.operators + .iter() + .flat_map(|operator| { + operator + .config + .outputs + .iter() + .map(|output_id| DataId::from(format!("{}/{output_id}", operator.id))) + }) + .collect() +} + +async fn send_input_closed_events( + dataflow: &mut RunningDataflow, + inter_daemon_connections: &mut BTreeMap, + mut filter: F, + clock: &HLC, +) -> eyre::Result<()> +where + F: FnMut(&OutputId) -> bool, +{ + let local_node_inputs: BTreeSet<_> = dataflow + .mappings + .iter() + .filter(|(k, _)| filter(k)) + .flat_map(|(_, v)| v) + .cloned() + .collect(); + for (receiver_id, input_id) in &local_node_inputs { + close_input(dataflow, receiver_id, input_id, clock); + } + + let mut external_node_inputs = BTreeMap::new(); + for (output_id, mapping) in &mut dataflow.open_external_mappings { + if filter(output_id) { + external_node_inputs.append(mapping); + } + } + if !external_node_inputs.is_empty() { + for (target_machine, inputs) in external_node_inputs { + let event = Timestamped { + inner: InterDaemonEvent::InputsClosed { + dataflow_id: dataflow.id, + inputs, + }, + timestamp: clock.new_timestamp(), + }; + inter_daemon::send_inter_daemon_event( + &[target_machine], + inter_daemon_connections, + &event, + ) + .await + .wrap_err("failed to sent InputClosed event to remote receiver")?; + } + } + Ok(()) +} + +fn close_input( + dataflow: &mut RunningDataflow, + receiver_id: &NodeId, + input_id: &DataId, + clock: &HLC, +) { + if let Some(open_inputs) = dataflow.open_inputs.get_mut(receiver_id) { + if !open_inputs.remove(input_id) { + return; + } + } + if let Some(channel) = dataflow.subscribe_channels.get(receiver_id) { + let _ = send_with_timestamp( + channel, + daemon_messages::NodeEvent::InputClosed { + id: input_id.clone(), + }, + clock, + ); + + if dataflow.open_inputs(receiver_id).is_empty() { + let _ = + send_with_timestamp(channel, daemon_messages::NodeEvent::AllInputsClosed, clock); + } + } +} + +#[derive(Debug, Clone)] +struct RunningNode { + pid: u32, +} + +pub struct RunningDataflow { + id: Uuid, + /// Local nodes that are not started yet + pending_nodes: PendingNodes, + + subscribe_channels: HashMap>>, + drop_channels: HashMap>>, + mappings: HashMap>, + timers: BTreeMap>, + open_inputs: BTreeMap>, + running_nodes: BTreeMap, + + open_external_mappings: HashMap>>, + + pending_drop_tokens: HashMap, + + /// Keep handles to all timer tasks of this dataflow to cancel them on drop. + _timer_handles: Vec>, + stop_sent: bool, + + /// Used in `open_inputs`. + /// + /// TODO: replace this with a constant once `BTreeSet::new` is `const` on stable. + empty_set: BTreeSet, +} + +impl RunningDataflow { + fn new(dataflow_id: Uuid, machine_id: String) -> RunningDataflow { + Self { + id: dataflow_id, + pending_nodes: PendingNodes::new(dataflow_id, machine_id), + subscribe_channels: HashMap::new(), + drop_channels: HashMap::new(), + mappings: HashMap::new(), + timers: BTreeMap::new(), + open_inputs: BTreeMap::new(), + running_nodes: BTreeMap::new(), + open_external_mappings: HashMap::new(), + pending_drop_tokens: HashMap::new(), + _timer_handles: Vec::new(), + stop_sent: false, + empty_set: BTreeSet::new(), + } + } + + async fn start( + &mut self, + events_tx: &mpsc::Sender>, + clock: &Arc, + ) -> eyre::Result<()> { + for interval in self.timers.keys().copied() { + let events_tx = events_tx.clone(); + let dataflow_id = self.id; + let clock = clock.clone(); + let task = async move { + let mut interval_stream = tokio::time::interval(interval); + let hlc = HLC::default(); + loop { + interval_stream.tick().await; + + let span = tracing::span!(tracing::Level::TRACE, "tick"); + let _ = span.enter(); + + let metadata = dora_core::message::Metadata::from_parameters( + hlc.new_timestamp(), + ArrowTypeInfo::empty(), + MetadataParameters { + watermark: 0, + deadline: 0, + #[cfg(feature = "telemetry")] + open_telemetry_context: serialize_context(&span.context()), + #[cfg(not(feature = "telemetry"))] + open_telemetry_context: "".into(), + }, + ); + + let event = Timestamped { + inner: DoraEvent::Timer { + dataflow_id, + interval, + metadata, + } + .into(), + timestamp: clock.new_timestamp(), + }; + if events_tx.send(event).await.is_err() { + break; + } + } + }; + let (task, handle) = task.remote_handle(); + tokio::spawn(task); + self._timer_handles.push(handle); + } + + Ok(()) + } + + async fn stop_all(&mut self, clock: &HLC, grace_duration: Option) { + for (_node_id, channel) in self.subscribe_channels.drain() { + let _ = send_with_timestamp(&channel, daemon_messages::NodeEvent::Stop, clock); + } + + let running_nodes = self.running_nodes.clone(); + tokio::spawn(async move { + let duration = grace_duration.unwrap_or(Duration::from_millis(500)); + tokio::time::sleep(duration).await; + let mut system = sysinfo::System::new(); + system.refresh_processes(); + + for (node, node_details) in running_nodes.iter() { + if let Some(process) = system.process(Pid::from(node_details.pid as usize)) { + process.kill(); + warn!( + "{node} was killed due to not stopping within the {:#?} grace period", + duration + ) + } + } + }); + self.stop_sent = true; + } + + fn open_inputs(&self, node_id: &NodeId) -> &BTreeSet { + self.open_inputs.get(node_id).unwrap_or(&self.empty_set) + } + + async fn check_drop_token(&mut self, token: DropToken, clock: &HLC) -> eyre::Result<()> { + match self.pending_drop_tokens.entry(token) { + std::collections::hash_map::Entry::Occupied(entry) => { + if entry.get().pending_nodes.is_empty() { + let (drop_token, info) = entry.remove_entry(); + let result = match self.drop_channels.get_mut(&info.owner) { + Some(channel) => send_with_timestamp( + channel, + daemon_messages::NodeDropEvent::OutputDropped { drop_token }, + clock, + ) + .wrap_err("send failed"), + None => Err(eyre!("no subscribe channel for node `{}`", &info.owner)), + }; + if let Err(err) = result.wrap_err_with(|| { + format!( + "failed to report drop token `{drop_token:?}` to owner `{}`", + &info.owner + ) + }) { + tracing::warn!("{err:?}"); + } + } + } + std::collections::hash_map::Entry::Vacant(_) => { + tracing::warn!("check_drop_token called with already closed token") + } + } + + Ok(()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct OutputId(NodeId, DataId); +type InputId = (NodeId, DataId); + +struct DropTokenInformation { + /// The node that created the associated drop token. + owner: NodeId, + /// Contains the set of pending nodes that still have access to the input + /// associated with a drop token. + pending_nodes: BTreeSet, +} + +#[derive(Debug)] +pub enum Event { + Node { + dataflow_id: DataflowId, + node_id: NodeId, + event: DaemonNodeEvent, + }, + Coordinator(CoordinatorEvent), + Daemon(InterDaemonEvent), + Dora(DoraEvent), + HeartbeatInterval, + CtrlC, +} + +impl From for Event { + fn from(event: DoraEvent) -> Self { + Event::Dora(event) + } +} + +#[derive(Debug)] +pub enum DaemonNodeEvent { + OutputsDone { + reply_sender: oneshot::Sender, + }, + Subscribe { + event_sender: UnboundedSender>, + reply_sender: oneshot::Sender, + }, + SubscribeDrop { + event_sender: UnboundedSender>, + reply_sender: oneshot::Sender, + }, + CloseOutputs { + outputs: Vec, + reply_sender: oneshot::Sender, + }, + SendOut { + output_id: DataId, + metadata: dora_core::message::Metadata, + data: Option, + }, + ReportDrop { + tokens: Vec, + }, + EventStreamDropped { + reply_sender: oneshot::Sender, + }, +} + +#[derive(Debug)] +pub enum DoraEvent { + Timer { + dataflow_id: DataflowId, + interval: Duration, + metadata: dora_core::message::Metadata, + }, + Logs { + dataflow_id: DataflowId, + output_id: OutputId, + message: DataMessage, + metadata: Metadata, + }, + SpawnedNodeResult { + dataflow_id: DataflowId, + node_id: NodeId, + exit_status: NodeExitStatus, + }, +} + +#[derive(Debug)] +pub enum NodeExitStatus { + Success, + IoError(io::Error), + ExitCode(i32), + Signal(i32), + Unknown, +} + +impl From> for NodeExitStatus { + fn from(result: Result) -> Self { + match result { + Ok(status) => { + if status.success() { + NodeExitStatus::Success + } else if let Some(code) = status.code() { + Self::ExitCode(code) + } else { + #[cfg(unix)] + { + use std::os::unix::process::ExitStatusExt; + if let Some(signal) = status.signal() { + return Self::Signal(signal); + } + } + Self::Unknown + } + } + Err(err) => Self::IoError(err), + } + } +} + +#[must_use] +enum RunStatus { + Continue, + Exit, +} + +fn send_with_timestamp( + sender: &UnboundedSender>, + event: T, + clock: &HLC, +) -> Result<(), mpsc::error::SendError>> { + sender.send(Timestamped { + inner: event, + timestamp: clock.new_timestamp(), + }) +} + +fn set_up_ctrlc_handler( + clock: Arc, +) -> Result>, eyre::ErrReport> { + let (ctrlc_tx, ctrlc_rx) = mpsc::channel(1); + + let mut ctrlc_sent = false; + ctrlc::set_handler(move || { + if ctrlc_sent { + tracing::warn!("received second ctrlc signal -> aborting immediately"); + std::process::abort(); + } else { + tracing::info!("received ctrlc signal"); + if ctrlc_tx + .blocking_send(Timestamped { + inner: Event::CtrlC, + timestamp: clock.new_timestamp(), + }) + .is_err() + { + tracing::error!("failed to report ctrl-c event to dora-coordinator"); + } + + ctrlc_sent = true; + } + }) + .wrap_err("failed to set ctrl-c handler")?; + + Ok(ReceiverStream::new(ctrlc_rx)) +} diff --git a/binaries/daemon/src/log.rs b/binaries/daemon/src/log.rs new file mode 100644 index 0000000000000000000000000000000000000000..55368a2380330f387f3477cc400154dadca83095 --- /dev/null +++ b/binaries/daemon/src/log.rs @@ -0,0 +1,9 @@ +use std::path::{Path, PathBuf}; + +use dora_core::config::NodeId; +use uuid::Uuid; + +pub fn log_path(working_dir: &Path, dataflow_id: &Uuid, node_id: &NodeId) -> PathBuf { + let dataflow_dir = working_dir.join("out").join(dataflow_id.to_string()); + dataflow_dir.join(format!("log_{node_id}.txt")) +} diff --git a/binaries/daemon/src/node_communication/mod.rs b/binaries/daemon/src/node_communication/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..0675367e1bc29a5f0e8a78cfe6eb6218244574b1 --- /dev/null +++ b/binaries/daemon/src/node_communication/mod.rs @@ -0,0 +1,571 @@ +use crate::{DaemonNodeEvent, Event}; +use dora_core::{ + config::{DataId, LocalCommunicationConfig, NodeId}, + daemon_messages::{ + DaemonCommunication, DaemonReply, DaemonRequest, DataflowId, NodeDropEvent, NodeEvent, + Timestamped, + }, + message::uhlc, +}; +use eyre::{eyre, Context}; +use futures::{future, task, Future}; +use shared_memory_server::{ShmemConf, ShmemServer}; +use std::{ + collections::{BTreeMap, VecDeque}, + mem, + net::Ipv4Addr, + sync::Arc, + task::Poll, +}; +use tokio::{ + net::TcpListener, + sync::{ + mpsc::{self, UnboundedReceiver}, + oneshot, + }, +}; + +// TODO unify and avoid duplication; +pub mod shmem; +pub mod tcp; + +pub async fn spawn_listener_loop( + dataflow_id: &DataflowId, + node_id: &NodeId, + daemon_tx: &mpsc::Sender>, + config: LocalCommunicationConfig, + queue_sizes: BTreeMap, + clock: Arc, +) -> eyre::Result { + match config { + LocalCommunicationConfig::Tcp => { + let localhost = Ipv4Addr::new(127, 0, 0, 1); + let socket = match TcpListener::bind((localhost, 0)).await { + Ok(socket) => socket, + Err(err) => { + return Err( + eyre::Report::new(err).wrap_err("failed to create local TCP listener") + ) + } + }; + let socket_addr = socket + .local_addr() + .wrap_err("failed to get local addr of socket")?; + + let event_loop_node_id = format!("{dataflow_id}/{node_id}"); + let daemon_tx = daemon_tx.clone(); + tokio::spawn(async move { + tcp::listener_loop(socket, daemon_tx, queue_sizes, clock).await; + tracing::debug!("event listener loop finished for `{event_loop_node_id}`"); + }); + + Ok(DaemonCommunication::Tcp { socket_addr }) + } + LocalCommunicationConfig::Shmem => { + let daemon_control_region = ShmemConf::new() + .size(4096) + .create() + .wrap_err("failed to allocate daemon_control_region")?; + let daemon_events_region = ShmemConf::new() + .size(4096) + .create() + .wrap_err("failed to allocate daemon_events_region")?; + let daemon_drop_region = ShmemConf::new() + .size(4096) + .create() + .wrap_err("failed to allocate daemon_drop_region")?; + let daemon_events_close_region = ShmemConf::new() + .size(4096) + .create() + .wrap_err("failed to allocate daemon_drop_region")?; + let daemon_control_region_id = daemon_control_region.get_os_id().to_owned(); + let daemon_events_region_id = daemon_events_region.get_os_id().to_owned(); + let daemon_drop_region_id = daemon_drop_region.get_os_id().to_owned(); + let daemon_events_close_region_id = daemon_events_close_region.get_os_id().to_owned(); + + { + let server = unsafe { ShmemServer::new(daemon_control_region) } + .wrap_err("failed to create control server")?; + let daemon_tx = daemon_tx.clone(); + let queue_sizes = queue_sizes.clone(); + let clock = clock.clone(); + tokio::spawn(shmem::listener_loop(server, daemon_tx, queue_sizes, clock)); + } + + { + let server = unsafe { ShmemServer::new(daemon_events_region) } + .wrap_err("failed to create events server")?; + let event_loop_node_id = format!("{dataflow_id}/{node_id}"); + let daemon_tx = daemon_tx.clone(); + let queue_sizes = queue_sizes.clone(); + let clock = clock.clone(); + tokio::task::spawn(async move { + shmem::listener_loop(server, daemon_tx, queue_sizes, clock).await; + tracing::debug!("event listener loop finished for `{event_loop_node_id}`"); + }); + } + + { + let server = unsafe { ShmemServer::new(daemon_drop_region) } + .wrap_err("failed to create drop server")?; + let drop_loop_node_id = format!("{dataflow_id}/{node_id}"); + let daemon_tx = daemon_tx.clone(); + let queue_sizes = queue_sizes.clone(); + let clock = clock.clone(); + tokio::task::spawn(async move { + shmem::listener_loop(server, daemon_tx, queue_sizes, clock).await; + tracing::debug!("drop listener loop finished for `{drop_loop_node_id}`"); + }); + } + + { + let server = unsafe { ShmemServer::new(daemon_events_close_region) } + .wrap_err("failed to create events close server")?; + let drop_loop_node_id = format!("{dataflow_id}/{node_id}"); + let daemon_tx = daemon_tx.clone(); + let clock = clock.clone(); + tokio::task::spawn(async move { + shmem::listener_loop(server, daemon_tx, queue_sizes, clock).await; + tracing::debug!( + "events close listener loop finished for `{drop_loop_node_id}`" + ); + }); + } + + Ok(DaemonCommunication::Shmem { + daemon_control_region_id, + daemon_events_region_id, + daemon_drop_region_id, + daemon_events_close_region_id, + }) + } + } +} + +struct Listener { + dataflow_id: DataflowId, + node_id: NodeId, + daemon_tx: mpsc::Sender>, + subscribed_events: Option>>, + subscribed_drop_events: Option>>, + queue: VecDeque>>>, + queue_sizes: BTreeMap, + clock: Arc, +} + +impl Listener { + pub(crate) async fn run( + mut connection: C, + daemon_tx: mpsc::Sender>, + queue_sizes: BTreeMap, + hlc: Arc, + ) { + // receive the first message + let message = match connection + .receive_message() + .await + .wrap_err("failed to receive register message") + { + Ok(Some(m)) => m, + Ok(None) => { + tracing::info!("channel disconnected before register message"); + return; + } // disconnected + Err(err) => { + tracing::info!("{err:?}"); + return; + } + }; + + if let Err(err) = hlc.update_with_timestamp(&message.timestamp) { + tracing::warn!("failed to update HLC: {err}"); + } + + match message.inner { + DaemonRequest::Register { + dataflow_id, + node_id, + dora_version: node_api_version, + } => { + let daemon_version = env!("CARGO_PKG_VERSION"); + let result = if node_api_version == daemon_version { + Ok(()) + } else { + Err(format!( + "version mismatch: node API v{node_api_version} is not compatible \ + with daemon v{daemon_version}" + )) + }; + let send_result = connection + .send_reply(DaemonReply::Result(result.clone())) + .await + .wrap_err("failed to send register reply"); + match (result, send_result) { + (Ok(()), Ok(())) => { + let mut listener = Listener { + dataflow_id, + node_id, + daemon_tx, + subscribed_events: None, + subscribed_drop_events: None, + queue_sizes, + queue: VecDeque::new(), + clock: hlc.clone(), + }; + match listener + .run_inner(connection) + .await + .wrap_err("listener failed") + { + Ok(()) => {} + Err(err) => tracing::error!("{err:?}"), + } + } + (Err(err), _) => { + tracing::warn!("failed to register node {dataflow_id}/{node_id}: {err}"); + } + (Ok(()), Err(err)) => { + tracing::warn!( + "failed send register reply to node {dataflow_id}/{node_id}: {err:?}" + ); + } + } + } + other => { + tracing::warn!("expected register message, got `{other:?}`"); + let reply = DaemonReply::Result(Err("must send register message first".into())); + if let Err(err) = connection + .send_reply(reply) + .await + .wrap_err("failed to send reply") + { + tracing::warn!("{err:?}"); + } + } + } + } + + async fn run_inner(&mut self, mut connection: C) -> eyre::Result<()> { + loop { + let mut next_message = connection.receive_message(); + let message = loop { + let next_event = self.next_event(); + let event = match future::select(next_event, next_message).await { + future::Either::Left((event, n)) => { + next_message = n; + event + } + future::Either::Right((message, _)) => break message, + }; + + self.queue.push_back(Box::new(Some(event))); + self.handle_events().await?; + }; + + match message.wrap_err("failed to receive DaemonRequest") { + Ok(Some(message)) => { + if let Err(err) = self.handle_message(message, &mut connection).await { + tracing::warn!("{err:?}"); + } + } + Err(err) => { + tracing::warn!("{err:?}"); + } + Ok(None) => { + break; // disconnected + } + } + } + Ok(()) + } + + async fn handle_events(&mut self) -> eyre::Result<()> { + if let Some(events) = &mut self.subscribed_events { + while let Ok(event) = events.try_recv() { + self.queue.push_back(Box::new(Some(event))); + } + + // drop oldest input events to maintain max queue length queue + self.drop_oldest_inputs().await?; + } + Ok(()) + } + + #[tracing::instrument(skip(self), fields(%self.node_id), level = "trace")] + async fn drop_oldest_inputs(&mut self) -> Result<(), eyre::ErrReport> { + let mut queue_size_remaining = self.queue_sizes.clone(); + let mut dropped = 0; + let mut drop_tokens = Vec::new(); + + // iterate over queued events, newest first + for event in self.queue.iter_mut().rev() { + let Some(Timestamped { + inner: NodeEvent::Input { id, data, .. }, + .. + }) = event.as_mut() + else { + continue; + }; + match queue_size_remaining.get_mut(id) { + Some(0) => { + dropped += 1; + if let Some(drop_token) = data.as_ref().and_then(|d| d.drop_token()) { + drop_tokens.push(drop_token); + } + *event.as_mut() = None; + } + Some(size_remaining) => { + *size_remaining = size_remaining.saturating_sub(1); + } + None => { + tracing::warn!("no queue size known for received input `{id}`"); + } + } + } + self.report_drop_tokens(drop_tokens).await?; + + if dropped > 0 { + tracing::debug!( + "dropped {dropped} inputs of node `{}` because event queue was too full", + self.node_id + ); + } + Ok(()) + } + + #[tracing::instrument(skip(self, connection), fields(%self.dataflow_id, %self.node_id), level = "trace")] + async fn handle_message( + &mut self, + message: Timestamped, + connection: &mut C, + ) -> eyre::Result<()> { + let timestamp = message.timestamp; + if let Err(err) = self.clock.update_with_timestamp(×tamp) { + tracing::warn!("failed to update HLC: {err}"); + } + match message.inner { + DaemonRequest::Register { .. } => { + let reply = DaemonReply::Result(Err("unexpected register message".into())); + self.send_reply(reply, connection) + .await + .wrap_err("failed to send register reply")?; + } + DaemonRequest::OutputsDone => { + let (reply_sender, reply) = oneshot::channel(); + self.process_daemon_event( + DaemonNodeEvent::OutputsDone { reply_sender }, + Some(reply), + connection, + ) + .await? + } + DaemonRequest::CloseOutputs(outputs) => { + let (reply_sender, reply) = oneshot::channel(); + self.process_daemon_event( + DaemonNodeEvent::CloseOutputs { + outputs, + reply_sender, + }, + Some(reply), + connection, + ) + .await? + } + DaemonRequest::SendMessage { + output_id, + metadata, + data, + } => { + let event = crate::DaemonNodeEvent::SendOut { + output_id, + metadata, + data, + }; + self.process_daemon_event(event, None, connection).await?; + } + DaemonRequest::Subscribe => { + let (tx, rx) = mpsc::unbounded_channel(); + let (reply_sender, reply) = oneshot::channel(); + self.process_daemon_event( + DaemonNodeEvent::Subscribe { + event_sender: tx, + reply_sender, + }, + Some(reply), + connection, + ) + .await?; + self.subscribed_events = Some(rx); + } + DaemonRequest::SubscribeDrop => { + let (tx, rx) = mpsc::unbounded_channel(); + let (reply_sender, reply) = oneshot::channel(); + self.process_daemon_event( + DaemonNodeEvent::SubscribeDrop { + event_sender: tx, + reply_sender, + }, + Some(reply), + connection, + ) + .await?; + self.subscribed_drop_events = Some(rx); + } + DaemonRequest::NextEvent { drop_tokens } => { + self.report_drop_tokens(drop_tokens).await?; + + // try to take the queued events first + let queued_events: Vec<_> = mem::take(&mut self.queue) + .into_iter() + .filter_map(|e| *e) + .collect(); + let reply = if queued_events.is_empty() { + match self.subscribed_events.as_mut() { + // wait for next event + Some(events) => match events.recv().await { + Some(event) => DaemonReply::NextEvents(vec![event]), + None => DaemonReply::NextEvents(vec![]), + }, + None => { + DaemonReply::Result(Err("Ignoring event request because no subscribe \ + message was sent yet" + .into())) + } + } + } else { + DaemonReply::NextEvents(queued_events) + }; + + self.send_reply(reply.clone(), connection) + .await + .wrap_err_with(|| format!("failed to send NextEvent reply: {reply:?}"))?; + } + DaemonRequest::ReportDropTokens { drop_tokens } => { + self.report_drop_tokens(drop_tokens).await?; + + self.send_reply(DaemonReply::Empty, connection) + .await + .wrap_err("failed to send ReportDropTokens reply")?; + } + DaemonRequest::NextFinishedDropTokens => { + let reply = match self.subscribed_drop_events.as_mut() { + // wait for next event + Some(events) => match events.recv().await { + Some(event) => DaemonReply::NextDropEvents(vec![event]), + None => DaemonReply::NextDropEvents(vec![]), + }, + None => DaemonReply::Result(Err("Ignoring event request because no drop \ + subscribe message was sent yet" + .into())), + }; + + self.send_reply(reply.clone(), connection) + .await + .wrap_err_with(|| { + format!("failed to send NextFinishedDropTokens reply: {reply:?}") + })?; + } + DaemonRequest::EventStreamDropped => { + let (reply_sender, reply) = oneshot::channel(); + self.process_daemon_event( + DaemonNodeEvent::EventStreamDropped { reply_sender }, + Some(reply), + connection, + ) + .await?; + } + } + Ok(()) + } + + async fn report_drop_tokens( + &mut self, + drop_tokens: Vec, + ) -> eyre::Result<()> { + if !drop_tokens.is_empty() { + let event = Event::Node { + dataflow_id: self.dataflow_id, + node_id: self.node_id.clone(), + event: DaemonNodeEvent::ReportDrop { + tokens: drop_tokens, + }, + }; + let event = Timestamped { + inner: event, + timestamp: self.clock.new_timestamp(), + }; + self.daemon_tx + .send(event) + .await + .map_err(|_| eyre!("failed to report drop tokens to daemon"))?; + } + Ok(()) + } + + async fn process_daemon_event( + &mut self, + event: DaemonNodeEvent, + reply: Option>, + connection: &mut C, + ) -> eyre::Result<()> { + // send NodeEvent to daemon main loop + let event = Event::Node { + dataflow_id: self.dataflow_id, + node_id: self.node_id.clone(), + event, + }; + let event = Timestamped { + inner: event, + timestamp: self.clock.new_timestamp(), + }; + self.daemon_tx + .send(event) + .await + .map_err(|_| eyre!("failed to send event to daemon"))?; + let reply = if let Some(reply) = reply { + reply + .await + .map_err(|_| eyre!("failed to receive reply from daemon"))? + } else { + DaemonReply::Empty + }; + self.send_reply(reply, connection).await?; + Ok(()) + } + + async fn send_reply( + &mut self, + reply: DaemonReply, + connection: &mut C, + ) -> eyre::Result<()> { + connection + .send_reply(reply) + .await + .wrap_err_with(|| format!("failed to send reply to node `{}`", self.node_id)) + } + + /// Awaits the next subscribed event if any. Never resolves if the event channel is closed. + /// + /// This is similar to `self.subscribed_events.recv()`. The difference is that the future + /// does not return `None` when the channel is closed and instead stays pending forever. + /// This behavior can be useful when waiting for multiple event sources at once. + fn next_event(&mut self) -> impl Future> + Unpin + '_ { + let poll = |cx: &mut task::Context<'_>| { + if let Some(events) = &mut self.subscribed_events { + match events.poll_recv(cx) { + Poll::Ready(Some(event)) => Poll::Ready(event), + Poll::Ready(None) | Poll::Pending => Poll::Pending, + } + } else { + Poll::Pending + } + }; + future::poll_fn(poll) + } +} + +#[async_trait::async_trait] +trait Connection { + async fn receive_message(&mut self) -> eyre::Result>>; + async fn send_reply(&mut self, message: DaemonReply) -> eyre::Result<()>; +} diff --git a/binaries/daemon/src/node_communication/shmem.rs b/binaries/daemon/src/node_communication/shmem.rs new file mode 100644 index 0000000000000000000000000000000000000000..212ae19d184f8739907ac4069481a91f4f65af26 --- /dev/null +++ b/binaries/daemon/src/node_communication/shmem.rs @@ -0,0 +1,82 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use super::{Connection, Listener}; +use crate::Event; +use dora_core::{ + config::DataId, + daemon_messages::{DaemonReply, DaemonRequest, Timestamped}, + message::uhlc::HLC, +}; +use eyre::eyre; +use shared_memory_server::ShmemServer; +use tokio::sync::{mpsc, oneshot}; + +#[tracing::instrument(skip(server, daemon_tx, clock), level = "trace")] +pub async fn listener_loop( + mut server: ShmemServer, DaemonReply>, + daemon_tx: mpsc::Sender>, + queue_sizes: BTreeMap, + clock: Arc, +) { + let (tx, rx) = flume::bounded(0); + tokio::task::spawn_blocking(move || { + while let Ok(operation) = rx.recv() { + match operation { + Operation::Receive(sender) => { + if sender.send(server.listen()).is_err() { + break; + } + } + Operation::Send { + message, + result_sender, + } => { + let result = server.send_reply(&message); + if result_sender.send(result).is_err() { + break; + } + } + } + } + }); + let connection = ShmemConnection(tx); + Listener::run(connection, daemon_tx, queue_sizes, clock).await +} + +enum Operation { + Receive(oneshot::Sender>>>), + Send { + message: DaemonReply, + result_sender: oneshot::Sender>, + }, +} + +struct ShmemConnection(flume::Sender); + +#[async_trait::async_trait] +impl Connection for ShmemConnection { + async fn receive_message(&mut self) -> eyre::Result>> { + let (tx, rx) = oneshot::channel(); + self.0 + .send_async(Operation::Receive(tx)) + .await + .map_err(|_| eyre!("failed send receive request to ShmemServer"))?; + rx.await + .map_err(|_| eyre!("failed to receive from ShmemServer")) + .and_then(|r| r) + } + + async fn send_reply(&mut self, reply: DaemonReply) -> eyre::Result<()> { + let (tx, rx) = oneshot::channel(); + self.0 + .send_async(Operation::Send { + message: reply, + result_sender: tx, + }) + .await + .map_err(|_| eyre!("failed send send request to ShmemServer"))?; + rx.await + .map_err(|_| eyre!("failed to receive from ShmemServer")) + .and_then(|r| r) + } +} diff --git a/binaries/daemon/src/node_communication/tcp.rs b/binaries/daemon/src/node_communication/tcp.rs new file mode 100644 index 0000000000000000000000000000000000000000..c4d6122cc380146e5c51489ab3a7321c06b1a5d1 --- /dev/null +++ b/binaries/daemon/src/node_communication/tcp.rs @@ -0,0 +1,95 @@ +use std::{collections::BTreeMap, io::ErrorKind, sync::Arc}; + +use super::{Connection, Listener}; +use crate::{ + tcp_utils::{tcp_receive, tcp_send}, + Event, +}; +use dora_core::{ + config::DataId, + daemon_messages::{DaemonReply, DaemonRequest, Timestamped}, + message::uhlc::HLC, +}; +use eyre::Context; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::mpsc, +}; + +#[tracing::instrument(skip(listener, daemon_tx, clock), level = "trace")] +pub async fn listener_loop( + listener: TcpListener, + daemon_tx: mpsc::Sender>, + queue_sizes: BTreeMap, + clock: Arc, +) { + loop { + match listener + .accept() + .await + .wrap_err("failed to accept new connection") + { + Err(err) => { + tracing::info!("{err}"); + } + Ok((connection, _)) => { + tokio::spawn(handle_connection_loop( + connection, + daemon_tx.clone(), + queue_sizes.clone(), + clock.clone(), + )); + } + } + } +} + +#[tracing::instrument(skip(connection, daemon_tx, clock), level = "trace")] +async fn handle_connection_loop( + connection: TcpStream, + daemon_tx: mpsc::Sender>, + queue_sizes: BTreeMap, + clock: Arc, +) { + if let Err(err) = connection.set_nodelay(true) { + tracing::warn!("failed to set nodelay for connection: {err}"); + } + + Listener::run(TcpConnection(connection), daemon_tx, queue_sizes, clock).await +} + +struct TcpConnection(TcpStream); + +#[async_trait::async_trait] +impl Connection for TcpConnection { + async fn receive_message(&mut self) -> eyre::Result>> { + let raw = match tcp_receive(&mut self.0).await { + Ok(raw) => raw, + Err(err) => match err.kind() { + ErrorKind::UnexpectedEof + | ErrorKind::ConnectionAborted + | ErrorKind::ConnectionReset => return Ok(None), + _other => { + return Err(err) + .context("unexpected I/O error while trying to receive DaemonRequest") + } + }, + }; + bincode::deserialize(&raw) + .wrap_err("failed to deserialize DaemonRequest") + .map(Some) + } + + async fn send_reply(&mut self, message: DaemonReply) -> eyre::Result<()> { + if matches!(message, DaemonReply::Empty) { + // don't send empty replies + return Ok(()); + } + let serialized = + bincode::serialize(&message).wrap_err("failed to serialize DaemonReply")?; + tcp_send(&mut self.0, &serialized) + .await + .wrap_err("failed to send DaemonReply")?; + Ok(()) + } +} diff --git a/binaries/daemon/src/pending.rs b/binaries/daemon/src/pending.rs new file mode 100644 index 0000000000000000000000000000000000000000..ccba1a5679fc9f191d506803fb9693b35aa1d633 --- /dev/null +++ b/binaries/daemon/src/pending.rs @@ -0,0 +1,187 @@ +use std::collections::{HashMap, HashSet}; + +use dora_core::{ + config::NodeId, + coordinator_messages::{CoordinatorRequest, DaemonEvent}, + daemon_messages::{DaemonReply, DataflowId, Timestamped}, + message::uhlc::{Timestamp, HLC}, +}; +use eyre::{bail, Context}; +use tokio::{net::TcpStream, sync::oneshot}; + +use crate::tcp_utils::tcp_send; + +pub struct PendingNodes { + dataflow_id: DataflowId, + machine_id: String, + + /// The local nodes that are still waiting to start. + local_nodes: HashSet, + /// Whether there are external nodes for this dataflow. + external_nodes: bool, + + /// Used to synchronize node starts. + /// + /// Subscribe requests block the node until all other nodes are ready too. + waiting_subscribers: HashMap>, + /// List of nodes that finished before connecting to the dora daemon. + /// + /// If this list is non-empty, we should not start the dataflow at all. Instead, + /// we report an error to the other nodes. + exited_before_subscribe: HashSet, + + /// Whether the local init result was already reported to the coordinator. + reported_init_to_coordinator: bool, +} + +impl PendingNodes { + pub fn new(dataflow_id: DataflowId, machine_id: String) -> Self { + Self { + dataflow_id, + machine_id, + local_nodes: HashSet::new(), + external_nodes: false, + waiting_subscribers: HashMap::new(), + exited_before_subscribe: HashSet::new(), + reported_init_to_coordinator: false, + } + } + + pub fn insert(&mut self, node_id: NodeId) { + self.local_nodes.insert(node_id); + } + + pub fn set_external_nodes(&mut self, value: bool) { + self.external_nodes = value; + } + + pub async fn handle_node_subscription( + &mut self, + node_id: NodeId, + reply_sender: oneshot::Sender, + coordinator_connection: &mut Option, + clock: &HLC, + ) -> eyre::Result { + self.waiting_subscribers + .insert(node_id.clone(), reply_sender); + self.local_nodes.remove(&node_id); + + self.update_dataflow_status(coordinator_connection, clock) + .await + } + + pub async fn handle_node_stop( + &mut self, + node_id: &NodeId, + coordinator_connection: &mut Option, + clock: &HLC, + ) -> eyre::Result<()> { + if self.local_nodes.remove(node_id) { + tracing::warn!("node `{node_id}` exited before initializing dora connection"); + self.exited_before_subscribe.insert(node_id.clone()); + self.update_dataflow_status(coordinator_connection, clock) + .await?; + } + Ok(()) + } + + pub async fn handle_external_all_nodes_ready(&mut self, success: bool) -> eyre::Result<()> { + if !self.local_nodes.is_empty() { + bail!("received external `all_nodes_ready` event before local nodes were ready"); + } + let external_error = if success { + None + } else { + Some("some nodes failed to initialize on remote machines".to_string()) + }; + self.answer_subscribe_requests(external_error).await; + + Ok(()) + } + + async fn update_dataflow_status( + &mut self, + coordinator_connection: &mut Option, + clock: &HLC, + ) -> eyre::Result { + if self.local_nodes.is_empty() { + if self.external_nodes { + if !self.reported_init_to_coordinator { + self.report_nodes_ready(coordinator_connection, clock.new_timestamp()) + .await?; + self.reported_init_to_coordinator = true; + } + Ok(DataflowStatus::Pending) + } else { + self.answer_subscribe_requests(None).await; + Ok(DataflowStatus::AllNodesReady) + } + } else { + Ok(DataflowStatus::Pending) + } + } + + async fn answer_subscribe_requests(&mut self, external_error: Option) { + let result = if self.exited_before_subscribe.is_empty() { + match external_error { + Some(err) => Err(err), + None => Ok(()), + } + } else { + let node_id_message = if self.exited_before_subscribe.len() == 1 { + self.exited_before_subscribe + .iter() + .next() + .map(|node_id| node_id.to_string()) + .unwrap_or("".to_string()) + } else { + "".to_string() + }; + Err(format!( + "Some nodes exited before subscribing to dora: {:?}\n\n\ + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs {} {node_id_message}`.", + self.exited_before_subscribe, self.dataflow_id + )) + }; + // answer all subscribe requests + let subscribe_replies = std::mem::take(&mut self.waiting_subscribers); + for reply_sender in subscribe_replies.into_values() { + let _ = reply_sender.send(DaemonReply::Result(result.clone())); + } + } + + async fn report_nodes_ready( + &self, + coordinator_connection: &mut Option, + timestamp: Timestamp, + ) -> eyre::Result<()> { + let Some(connection) = coordinator_connection else { + bail!("no coordinator connection to send AllNodesReady"); + }; + + let success = self.exited_before_subscribe.is_empty(); + tracing::info!("all local nodes are ready (success = {success}), waiting for remote nodes"); + + let msg = serde_json::to_vec(&Timestamped { + inner: CoordinatorRequest::Event { + machine_id: self.machine_id.clone(), + event: DaemonEvent::AllNodesReady { + dataflow_id: self.dataflow_id, + success, + }, + }, + timestamp, + })?; + tcp_send(connection, &msg) + .await + .wrap_err("failed to send AllNodesReady message to dora-coordinator")?; + Ok(()) + } +} + +pub enum DataflowStatus { + AllNodesReady, + Pending, +} diff --git a/binaries/daemon/src/spawn.rs b/binaries/daemon/src/spawn.rs new file mode 100644 index 0000000000000000000000000000000000000000..f89df3e2409b0b1ddfa5be6f85f4bfbf26dd58bb --- /dev/null +++ b/binaries/daemon/src/spawn.rs @@ -0,0 +1,458 @@ +use crate::{ + log, node_communication::spawn_listener_loop, node_inputs, runtime_node_inputs, + runtime_node_outputs, DoraEvent, Event, NodeExitStatus, OutputId, +}; +use aligned_vec::{AVec, ConstAlign}; +use dora_arrow_convert::IntoArrow; +use dora_core::{ + config::{DataId, NodeRunConfig}, + daemon_messages::{DataMessage, DataflowId, NodeConfig, RuntimeConfig, Timestamped}, + descriptor::{ + resolve_path, source_is_url, Descriptor, OperatorDefinition, OperatorSource, PythonSource, + ResolvedNode, SHELL_SOURCE, + }, + get_python_path, + message::uhlc::HLC, +}; +use dora_download::download_file; +use dora_node_api::{ + arrow::array::ArrayData, + arrow_utils::{copy_array_into_sample, required_data_size}, + Metadata, +}; +use eyre::{ContextCompat, WrapErr}; +use std::{ + env::consts::EXE_EXTENSION, + path::{Path, PathBuf}, + process::Stdio, + sync::Arc, +}; +use tokio::{ + fs::File, + io::{AsyncBufReadExt, AsyncWriteExt}, + sync::{mpsc, oneshot}, +}; +use tracing::{debug, error}; + +/// clock is required for generating timestamps when dropping messages early because queue is full +pub async fn spawn_node( + dataflow_id: DataflowId, + working_dir: &Path, + node: ResolvedNode, + daemon_tx: mpsc::Sender>, + dataflow_descriptor: Descriptor, + clock: Arc, +) -> eyre::Result { + let node_id = node.id.clone(); + tracing::debug!("Spawning node `{dataflow_id}/{node_id}`"); + + let queue_sizes = node_inputs(&node) + .into_iter() + .map(|(k, v)| (k, v.queue_size.unwrap_or(10))) + .collect(); + let daemon_communication = spawn_listener_loop( + &dataflow_id, + &node_id, + &daemon_tx, + dataflow_descriptor.communication.local, + queue_sizes, + clock.clone(), + ) + .await?; + let send_stdout_to = node + .send_stdout_as() + .context("Could not resolve `send_stdout_as` configuration")?; + + let mut child = match node.kind { + dora_core::descriptor::CoreNodeKind::Custom(n) => { + let mut command = match n.source.as_str() { + SHELL_SOURCE => { + if cfg!(target_os = "windows") { + let mut cmd = tokio::process::Command::new("cmd"); + cmd.args(["/C", &n.args.clone().unwrap_or_default()]); + cmd + } else { + let mut cmd = tokio::process::Command::new("sh"); + cmd.args(["-c", &n.args.clone().unwrap_or_default()]); + cmd + } + } + source => { + let resolved_path = if source_is_url(source) { + // try to download the shared library + let target_path = Path::new("build") + .join(node_id.to_string()) + .with_extension(EXE_EXTENSION); + download_file(source, &target_path) + .await + .wrap_err("failed to download custom node")?; + target_path.clone() + } else { + resolve_path(source, working_dir).wrap_err_with(|| { + format!("failed to resolve node source `{}`", source) + })? + }; + + // If extension is .py, use python to run the script + let mut cmd = match resolved_path.extension().map(|ext| ext.to_str()) { + Some(Some("py")) => { + let python = get_python_path().context("Could not get python path")?; + tracing::info!("spawning: {:?} {}", &python, resolved_path.display()); + let mut cmd = tokio::process::Command::new(&python); + cmd.arg(&resolved_path); + cmd + } + _ => { + tracing::info!("spawning: {}", resolved_path.display()); + tokio::process::Command::new(&resolved_path) + } + }; + + if let Some(args) = &n.args { + cmd.args(args.split_ascii_whitespace()); + } + cmd + } + }; + + command.current_dir(working_dir); + command.stdin(Stdio::null()); + let node_config = NodeConfig { + dataflow_id, + node_id: node_id.clone(), + run_config: n.run_config.clone(), + daemon_communication, + dataflow_descriptor, + }; + + command.env( + "DORA_NODE_CONFIG", + serde_yaml::to_string(&node_config).wrap_err("failed to serialize node config")?, + ); + // Injecting the env variable defined in the `yaml` into + // the node runtime. + if let Some(envs) = node.env { + for (key, value) in envs { + command.env(key, value.to_string()); + } + } + if let Some(envs) = n.envs { + // node has some inner env variables -> add them too + for (key, value) in envs { + command.env(key, value.to_string()); + } + } + command + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .wrap_err_with(move || { + format!( + "failed to run `{}` with args `{}`", + n.source, + n.args.as_deref().unwrap_or_default(), + ) + })? + } + dora_core::descriptor::CoreNodeKind::Runtime(n) => { + let python_operators: Vec<&OperatorDefinition> = n + .operators + .iter() + .filter(|x| matches!(x.config.source, OperatorSource::Python { .. })) + .collect(); + + let other_operators = n + .operators + .iter() + .any(|x| !matches!(x.config.source, OperatorSource::Python { .. })); + + let mut command = if !python_operators.is_empty() && !other_operators { + // Use python to spawn runtime if there is a python operator + + // TODO: Handle multi-operator runtime once sub-interpreter is supported + if python_operators.len() > 2 { + eyre::bail!( + "Runtime currently only support one Python Operator. + This is because pyo4 sub-interpreter is not yet available. + See: https://github.com/PyO4/pyo3/issues/576" + ); + } + + let python_operator = python_operators + .first() + .context("Runtime had no operators definition.")?; + + if let OperatorSource::Python(PythonSource { + source: _, + conda_env: Some(conda_env), + }) = &python_operator.config.source + { + let conda = which::which("conda").context( + "failed to find `conda`, yet a `conda_env` was defined. Make sure that `conda` is available.", + )?; + let mut command = tokio::process::Command::new(conda); + command.args([ + "run", + "-n", + &conda_env, + "python", + "-c", + format!("import dora; dora.start_runtime() # {}", node.id).as_str(), + ]); + command + } else { + let python = get_python_path() + .context("Could not find python path when spawning runtime node")?; + let mut command = tokio::process::Command::new(python); + command.args([ + "-c", + format!("import dora; dora.start_runtime() # {}", node.id).as_str(), + ]); + command + } + } else if python_operators.is_empty() && other_operators { + let mut cmd = tokio::process::Command::new( + std::env::current_exe().wrap_err("failed to get current executable path")?, + ); + cmd.arg("runtime"); + cmd + } else { + eyre::bail!("Runtime can not mix Python Operator with other type of operator."); + }; + command.current_dir(working_dir); + + let runtime_config = RuntimeConfig { + node: NodeConfig { + dataflow_id, + node_id: node_id.clone(), + run_config: NodeRunConfig { + inputs: runtime_node_inputs(&n), + outputs: runtime_node_outputs(&n), + }, + daemon_communication, + dataflow_descriptor, + }, + operators: n.operators, + }; + command.env( + "DORA_RUNTIME_CONFIG", + serde_yaml::to_string(&runtime_config) + .wrap_err("failed to serialize runtime config")?, + ); + // Injecting the env variable defined in the `yaml` into + // the node runtime. + if let Some(envs) = node.env { + for (key, value) in envs { + command.env(key, value.to_string()); + } + } + + command + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .wrap_err(format!( + "failed to run runtime {}/{}", + runtime_config.node.dataflow_id, runtime_config.node.node_id + ))? + } + }; + + let dataflow_dir: PathBuf = working_dir.join("out").join(dataflow_id.to_string()); + if !dataflow_dir.exists() { + std::fs::create_dir_all(&dataflow_dir).context("could not create dataflow_dir")?; + } + let (tx, mut rx) = mpsc::channel(10); + let mut file = File::create(log::log_path(working_dir, &dataflow_id, &node_id)) + .await + .expect("Failed to create log file"); + let mut child_stdout = + tokio::io::BufReader::new(child.stdout.take().expect("failed to take stdout")); + let pid = child.id().unwrap(); + let stdout_tx = tx.clone(); + + // Stdout listener stream + tokio::spawn(async move { + let mut buffer = String::new(); + let mut finished = false; + while !finished { + let mut raw = Vec::new(); + finished = match child_stdout + .read_until(b'\n', &mut raw) + .await + .wrap_err_with(|| format!("failed to read stdout line from spawned node {node_id}")) + { + Ok(0) => true, + Ok(_) => false, + Err(err) => { + tracing::warn!("{err:?}"); + false + } + }; + + match String::from_utf8(raw) { + Ok(s) => buffer.push_str(&s), + Err(err) => { + let lossy = String::from_utf8_lossy(err.as_bytes()); + tracing::warn!( + "stdout not valid UTF-8 string (node {node_id}): {}: {lossy}", + err.utf8_error() + ); + buffer.push_str(&lossy) + } + }; + + if buffer.contains("TRACE") + || buffer.contains("INFO") + || buffer.contains("DEBUG") + || buffer.contains("WARN") + || buffer.contains("ERROR") + { + // tracing output, potentially multi-line -> keep reading following lines + // until double-newline + if !buffer.ends_with("\n\n") && !finished { + continue; + } + } + + // send the buffered lines + let lines = std::mem::take(&mut buffer); + let sent = stdout_tx.send(lines.clone()).await; + if sent.is_err() { + println!("Could not log: {lines}"); + } + } + }); + + let mut child_stderr = + tokio::io::BufReader::new(child.stderr.take().expect("failed to take stderr")); + + // Stderr listener stream + let stderr_tx = tx.clone(); + let node_id = node.id.clone(); + let uhlc = clock.clone(); + let daemon_tx_log = daemon_tx.clone(); + tokio::spawn(async move { + let mut buffer = String::new(); + let mut finished = false; + while !finished { + let mut raw = Vec::new(); + finished = match child_stderr + .read_until(b'\n', &mut raw) + .await + .wrap_err_with(|| format!("failed to read stderr line from spawned node {node_id}")) + { + Ok(0) => true, + Ok(_) => false, + Err(err) => { + tracing::warn!("{err:?}"); + true + } + }; + + match String::from_utf8(raw) { + Ok(s) => buffer.push_str(&s), + Err(err) => { + let lossy = String::from_utf8_lossy(err.as_bytes()); + tracing::warn!( + "stderr not valid UTF-8 string (node {node_id}): {}: {lossy}", + err.utf8_error() + ); + buffer.push_str(&lossy) + } + }; + + if buffer.starts_with("Traceback (most recent call last):") { + if !finished { + continue; + } else { + tracing::error!("{dataflow_id}/{}: \n{buffer}", node_id); + } + } + + // send the buffered lines + let lines = std::mem::take(&mut buffer); + let sent = stderr_tx.send(lines.clone()).await; + if sent.is_err() { + println!("Could not log: {lines}"); + } + } + }); + + let node_id = node.id.clone(); + let (log_finish_tx, log_finish_rx) = oneshot::channel(); + tokio::spawn(async move { + let exit_status = NodeExitStatus::from(child.wait().await); + let _ = log_finish_rx.await; + let event = DoraEvent::SpawnedNodeResult { + dataflow_id, + node_id, + exit_status, + } + .into(); + let event = Timestamped { + inner: event, + timestamp: clock.new_timestamp(), + }; + let _ = daemon_tx.send(event).await; + }); + + let node_id = node.id.clone(); + // Log to file stream. + tokio::spawn(async move { + while let Some(message) = rx.recv().await { + // If log is an output, we're sending the logs to the dataflow + if let Some(stdout_output_name) = &send_stdout_to { + // Convert logs to DataMessage + let array = message.into_arrow(); + + let array: ArrayData = array.into(); + let total_len = required_data_size(&array); + let mut sample: AVec> = AVec::__from_elem(128, 0, total_len); + + let type_info = copy_array_into_sample(&mut sample, &array); + + let metadata = Metadata::new(uhlc.new_timestamp(), type_info); + let output_id = OutputId( + node_id.clone(), + DataId::from(stdout_output_name.to_string()), + ); + let event = DoraEvent::Logs { + dataflow_id, + output_id, + metadata, + message: DataMessage::Vec(sample), + } + .into(); + let event = Timestamped { + inner: event, + timestamp: uhlc.new_timestamp(), + }; + let _ = daemon_tx_log.send(event).await; + } + + let _ = file + .write_all(message.as_bytes()) + .await + .map_err(|err| error!("Could not log {message} to file due to {err}")); + let formatted = message.lines().fold(String::default(), |mut output, line| { + output.push_str(" "); + output.push_str(line); + output.push('\n'); + output + }); + tracing::trace!("{dataflow_id}/{} logged:\n{formatted}", node.id.clone()); + // Make sure that all data has been synced to disk. + let _ = file + .sync_all() + .await + .map_err(|err| error!("Could not sync logs to file due to {err}")); + } + let _ = log_finish_tx + .send(()) + .map_err(|_| error!("Could not inform that log file thread finished")); + }); + Ok(pid) +} diff --git a/binaries/daemon/src/tcp_utils.rs b/binaries/daemon/src/tcp_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..db327c58485ce48f1729c788f22e04fe5db3101f --- /dev/null +++ b/binaries/daemon/src/tcp_utils.rs @@ -0,0 +1,23 @@ +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +pub async fn tcp_send( + connection: &mut (impl AsyncWrite + Unpin), + message: &[u8], +) -> std::io::Result<()> { + let len_raw = (message.len() as u64).to_le_bytes(); + connection.write_all(&len_raw).await?; + connection.write_all(message).await?; + connection.flush().await?; + Ok(()) +} + +pub async fn tcp_receive(connection: &mut (impl AsyncRead + Unpin)) -> std::io::Result> { + let reply_len = { + let mut raw = [0; 8]; + connection.read_exact(&mut raw).await?; + u64::from_le_bytes(raw) as usize + }; + let mut reply = vec![0; reply_len]; + connection.read_exact(&mut reply).await?; + Ok(reply) +} diff --git a/binaries/runtime/Cargo.toml b/binaries/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2f6b923dc1c5a9adb30646080f24175ccc37c99e --- /dev/null +++ b/binaries/runtime/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "dora-runtime" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, default-features = false } +dora-operator-api-python = { workspace = true, optional = true } +dora-operator-api-types = { workspace = true } +dora-core = { workspace = true } +dora-tracing = { workspace = true, optional = true } +dora-metrics = { workspace = true, optional = true } +eyre = "0.6.8" +futures = "0.3.21" +futures-concurrency = "7.1.0" +libloading = "0.7.3" +serde_yaml = "0.8.23" +tokio = { version = "1.24.2", features = ["full"] } +tokio-stream = "0.1.8" +# pyo3-abi3 flag allow simpler linking. See: https://pyo3.rs/v0.13.2/building_and_distribution.html +pyo3 = { workspace = true, features = ["eyre", "abi3-py37"], optional = true } +tracing = "0.1.36" +dora-download = { workspace = true } +flume = "0.10.14" +tracing-opentelemetry = { version = "0.18.0", optional = true } +pythonize = { workspace = true, optional = true } +arrow = { workspace = true, features = ["ffi"] } +aligned-vec = "0.5.0" + +[features] +default = ["tracing", "metrics"] +tracing = ["dora-tracing"] +telemetry = ["tracing", "tracing-opentelemetry"] +metrics = ["dora-metrics"] +python = ["pyo3", "dora-operator-api-python", "pythonize", "arrow/pyarrow"] diff --git a/binaries/runtime/src/lib.rs b/binaries/runtime/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..308d59f172cea65f125837e926a54d2d7acf15f3 --- /dev/null +++ b/binaries/runtime/src/lib.rs @@ -0,0 +1,336 @@ +#![warn(unsafe_op_in_unsafe_fn)] + +use dora_core::{ + config::{DataId, OperatorId}, + daemon_messages::{NodeConfig, RuntimeConfig}, + descriptor::OperatorConfig, +}; +use dora_metrics::init_meter_provider; +use dora_node_api::{DoraNode, Event}; +use eyre::{bail, Context, Result}; +use futures::{Stream, StreamExt}; +use futures_concurrency::stream::Merge; +use operator::{run_operator, OperatorEvent, StopReason}; + +#[cfg(feature = "tracing")] +use dora_tracing::set_up_tracing; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + mem, +}; +use tokio::{ + runtime::Builder, + sync::{mpsc, oneshot}, +}; +use tokio_stream::wrappers::ReceiverStream; +mod operator; + +pub fn main() -> eyre::Result<()> { + let config: RuntimeConfig = { + let raw = std::env::var("DORA_RUNTIME_CONFIG") + .wrap_err("env variable DORA_RUNTIME_CONFIG must be set")?; + serde_yaml::from_str(&raw).context("failed to deserialize operator config")? + }; + let RuntimeConfig { + node: config, + operators, + } = config; + let node_id = config.node_id.clone(); + #[cfg(feature = "tracing")] + set_up_tracing(&node_id.to_string()).context("failed to set up tracing subscriber")?; + + let dataflow_descriptor = config.dataflow_descriptor.clone(); + + let operator_definition = if operators.is_empty() { + bail!("no operators"); + } else if operators.len() > 1 { + bail!("multiple operators are not supported"); + } else { + let mut ops = operators; + ops.remove(0) + }; + + let (operator_events_tx, events) = mpsc::channel(1); + let operator_id = operator_definition.id.clone(); + let operator_events = ReceiverStream::new(events).map(move |event| RuntimeEvent::Operator { + id: operator_id.clone(), + event, + }); + + let tokio_runtime = Builder::new_current_thread() + .enable_all() + .build() + .wrap_err("Could not build a tokio runtime.")?; + + let mut operator_channels = HashMap::new(); + let queue_sizes = queue_sizes(&operator_definition.config); + let (operator_channel, incoming_events) = + operator::channel::channel(tokio_runtime.handle(), queue_sizes); + operator_channels.insert(operator_definition.id.clone(), operator_channel); + + tracing::info!("spawning main task"); + let operator_config = [( + operator_definition.id.clone(), + operator_definition.config.clone(), + )] + .into_iter() + .collect(); + let (init_done_tx, init_done) = oneshot::channel(); + let main_task = std::thread::spawn(move || -> Result<()> { + tokio_runtime.block_on(run( + operator_config, + config, + operator_events, + operator_channels, + init_done, + )) + }); + + let operator_id = operator_definition.id.clone(); + run_operator( + &node_id, + operator_definition, + incoming_events, + operator_events_tx, + init_done_tx, + &dataflow_descriptor, + ) + .wrap_err_with(|| format!("failed to run operator {operator_id}"))?; + + match main_task.join() { + Ok(result) => result.wrap_err("main task failed")?, + Err(panic) => std::panic::resume_unwind(panic), + } + + Ok(()) +} + +fn queue_sizes(config: &OperatorConfig) -> std::collections::BTreeMap { + let mut sizes = BTreeMap::new(); + for (input_id, input) in &config.inputs { + let queue_size = input.queue_size.unwrap_or(10); + sizes.insert(input_id.clone(), queue_size); + } + sizes +} + +#[tracing::instrument(skip(operator_events, operator_channels), level = "trace")] +async fn run( + operators: HashMap, + config: NodeConfig, + operator_events: impl Stream + Unpin, + mut operator_channels: HashMap>, + init_done: oneshot::Receiver>, +) -> eyre::Result<()> { + #[cfg(feature = "metrics")] + let _meter_provider = init_meter_provider(config.node_id.to_string()); + init_done + .await + .wrap_err("the `init_done` channel was closed unexpectedly")? + .wrap_err("failed to init an operator")?; + tracing::info!("All operators are ready, starting runtime"); + + let (mut node, mut daemon_events) = DoraNode::init(config)?; + let (daemon_events_tx, daemon_event_stream) = flume::bounded(1); + tokio::task::spawn_blocking(move || { + while let Some(event) = daemon_events.recv() { + if daemon_events_tx.send(RuntimeEvent::Event(event)).is_err() { + break; + } + } + }); + let mut events = (operator_events, daemon_event_stream.into_stream()).merge(); + + let mut open_operator_inputs: HashMap<_, BTreeSet<_>> = operators + .iter() + .map(|(id, config)| (id, config.inputs.keys().collect())) + .collect(); + + while let Some(event) = events.next().await { + match event { + RuntimeEvent::Operator { + id: operator_id, + event, + } => { + match event { + OperatorEvent::Error(err) => { + bail!(err.wrap_err(format!( + "operator {}/{operator_id} raised an error", + node.id() + ))) + } + OperatorEvent::Panic(payload) => { + bail!("operator {operator_id} panicked: {payload:?}"); + } + OperatorEvent::Finished { reason } => { + if let StopReason::ExplicitStopAll = reason { + // let hlc = dora_core::message::uhlc::HLC::default(); + // let metadata = dora_core::message::Metadata::new(hlc.new_timestamp()); + // let data = metadata + // .serialize() + // .wrap_err("failed to serialize stop message")?; + todo!("instruct dora-daemon/dora-coordinator to stop other nodes"); + // manual_stop_publisher + // .publish(&data) + // .map_err(|err| eyre::eyre!(err)) + // .wrap_err("failed to send stop message")?; + // break; + } + + let Some(config) = operators.get(&operator_id) else { + tracing::warn!( + "received Finished event for unknown operator `{operator_id}`" + ); + continue; + }; + let outputs = config + .outputs + .iter() + .map(|output_id| operator_output_id(&operator_id, output_id)) + .collect(); + let result; + (node, result) = tokio::task::spawn_blocking(move || { + let result = node.close_outputs(outputs); + (node, result) + }) + .await + .wrap_err("failed to wait for close_outputs task")?; + result.wrap_err("failed to close outputs of finished operator")?; + + operator_channels.remove(&operator_id); + + if operator_channels.is_empty() { + break; + } + } + OperatorEvent::AllocateOutputSample { len, sample: tx } => { + let sample = node.allocate_data_sample(len); + if tx.send(sample).is_err() { + tracing::warn!("output sample requested, but operator {operator_id} exited already"); + } + } + OperatorEvent::Output { + output_id, + type_info, + parameters, + data, + } => { + let output_id = operator_output_id(&operator_id, &output_id); + let result; + (node, result) = tokio::task::spawn_blocking(move || { + let result = + node.send_output_sample(output_id, type_info, parameters, data); + (node, result) + }) + .await + .wrap_err("failed to wait for send_output task")?; + result.wrap_err("failed to send node output")?; + } + } + } + RuntimeEvent::Event(Event::Stop) => { + // forward stop event to all operators and close the event channels + for (_, channel) in operator_channels.drain() { + let _ = channel.send_async(Event::Stop).await; + } + } + RuntimeEvent::Event(Event::Reload { + operator_id: Some(operator_id), + }) => { + let _ = operator_channels + .get(&operator_id) + .unwrap() + .send_async(Event::Reload { + operator_id: Some(operator_id), + }) + .await; + } + RuntimeEvent::Event(Event::Reload { operator_id: None }) => { + tracing::warn!("Reloading runtime nodes is not supported"); + } + RuntimeEvent::Event(Event::Input { id, metadata, data }) => { + let Some((operator_id, input_id)) = id.as_str().split_once('/') else { + tracing::warn!("received non-operator input {id}"); + continue; + }; + let operator_id = OperatorId::from(operator_id.to_owned()); + let input_id = DataId::from(input_id.to_owned()); + let Some(operator_channel) = operator_channels.get(&operator_id) else { + tracing::warn!("received input {id} for unknown operator"); + continue; + }; + + if let Err(err) = operator_channel + .send_async(Event::Input { + id: input_id.clone(), + metadata, + data, + }) + .await + .wrap_err_with(|| { + format!("failed to send input `{input_id}` to operator `{operator_id}`") + }) + { + tracing::warn!("{err}"); + } + } + RuntimeEvent::Event(Event::InputClosed { id }) => { + let Some((operator_id, input_id)) = id.as_str().split_once('/') else { + tracing::warn!("received InputClosed event for non-operator input {id}"); + continue; + }; + let operator_id = OperatorId::from(operator_id.to_owned()); + let input_id = DataId::from(input_id.to_owned()); + + let Some(operator_channel) = operator_channels.get(&operator_id) else { + tracing::warn!("received input {id} for unknown operator"); + continue; + }; + if let Err(err) = operator_channel + .send_async(Event::InputClosed { + id: input_id.clone(), + }) + .await + .wrap_err_with(|| { + format!( + "failed to send InputClosed({input_id}) to operator `{operator_id}`" + ) + }) + { + tracing::warn!("{err}"); + } + + if let Some(open_inputs) = open_operator_inputs.get_mut(&operator_id) { + open_inputs.remove(&input_id); + if open_inputs.is_empty() { + // all inputs of the node were closed -> close its event channel + tracing::trace!("all inputs of operator {}/{operator_id} were closed -> closing event channel", node.id()); + open_operator_inputs.remove(&operator_id); + operator_channels.remove(&operator_id); + } + } + } + RuntimeEvent::Event(Event::Error(err)) => eyre::bail!("received error event: {err}"), + RuntimeEvent::Event(other) => { + tracing::warn!("received unknown event `{other:?}`"); + } + } + } + + mem::drop(events); + + Ok(()) +} + +fn operator_output_id(operator_id: &OperatorId, output_id: &DataId) -> DataId { + DataId::from(format!("{operator_id}/{output_id}")) +} + +#[derive(Debug)] +enum RuntimeEvent { + Operator { + id: OperatorId, + event: OperatorEvent, + }, + Event(Event), +} diff --git a/binaries/runtime/src/operator/channel.rs b/binaries/runtime/src/operator/channel.rs new file mode 100644 index 0000000000000000000000000000000000000000..eac20b73edc118d8907836dd21d8ec0dc9cc79c6 --- /dev/null +++ b/binaries/runtime/src/operator/channel.rs @@ -0,0 +1,128 @@ +use dora_core::config::DataId; +use dora_node_api::Event; +use futures::{ + future::{self, FusedFuture}, + FutureExt, +}; +use std::collections::{BTreeMap, VecDeque}; + +pub fn channel( + runtime: &tokio::runtime::Handle, + queue_sizes: BTreeMap, +) -> (flume::Sender, flume::Receiver) { + let (incoming_tx, incoming_rx) = flume::bounded(10); + let (outgoing_tx, outgoing_rx) = flume::bounded(0); + + runtime.spawn(async { + let mut buffer = InputBuffer::new(queue_sizes); + buffer.run(incoming_rx, outgoing_tx).await; + }); + + (incoming_tx, outgoing_rx) +} + +struct InputBuffer { + queue: VecDeque>, + queue_sizes: BTreeMap, +} + +impl InputBuffer { + pub fn new(queue_sizes: BTreeMap) -> Self { + Self { + queue: VecDeque::new(), + queue_sizes, + } + } + + pub async fn run(&mut self, incoming: flume::Receiver, outgoing: flume::Sender) { + let mut send_out_buf = future::Fuse::terminated(); + let mut incoming_closed = false; + loop { + let next_incoming = if incoming_closed { + future::Fuse::terminated() + } else { + incoming.recv_async().fuse() + }; + match future::select(next_incoming, send_out_buf).await { + future::Either::Left((event, mut send_out)) => { + match event { + Ok(event) => { + // received a new event -> push it to the queue + self.add_event(event); + + // if outgoing queue is empty, fill it again + if send_out.is_terminated() { + send_out = self.send_next_queued(&outgoing); + } + } + Err(flume::RecvError::Disconnected) => { + incoming_closed = true; + } + } + + // reassign the send_out future, which might be still in progress + send_out_buf = send_out; + } + future::Either::Right((send_result, _)) => match send_result { + Ok(()) => { + send_out_buf = self.send_next_queued(&outgoing); + } + Err(flume::SendError(_)) => break, + }, + }; + if incoming_closed && send_out_buf.is_terminated() && self.queue.is_empty() { + break; + } + } + } + + fn send_next_queued<'a>( + &mut self, + outgoing: &'a flume::Sender, + ) -> future::Fuse> { + loop { + match self.queue.pop_front() { + Some(Some(next)) => break outgoing.send_async(next).fuse(), + Some(None) => { + // dropped event, try again with next one + } + None => break future::Fuse::terminated(), + } + } + } + + fn add_event(&mut self, event: Event) { + self.queue.push_back(Some(event)); + + // drop oldest input events to maintain max queue length queue + self.drop_oldest_inputs(); + } + + fn drop_oldest_inputs(&mut self) { + let mut queue_size_remaining = self.queue_sizes.clone(); + let mut dropped = 0; + + // iterate over queued events, newest first + for event in self.queue.iter_mut().rev() { + let Some(Event::Input { id: input_id, .. }) = event.as_mut() else { + continue; + }; + match queue_size_remaining.get_mut(input_id) { + Some(0) => { + dropped += 1; + *event = None; + } + Some(size_remaining) => { + *size_remaining = size_remaining.saturating_sub(1); + } + None => { + tracing::warn!("no queue size known for received operator input `{input_id}`"); + } + } + } + + if dropped > 0 { + tracing::debug!("dropped {dropped} operator inputs because event queue was too full"); + } + } +} diff --git a/binaries/runtime/src/operator/mod.rs b/binaries/runtime/src/operator/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b78fb939bde4a6b29ae2c9a49210a5ccceb28f7c --- /dev/null +++ b/binaries/runtime/src/operator/mod.rs @@ -0,0 +1,97 @@ +use dora_core::{ + config::{DataId, NodeId}, + descriptor::{Descriptor, OperatorDefinition, OperatorSource}, + message::{ArrowTypeInfo, MetadataParameters}, +}; +use dora_node_api::{DataSample, Event}; +use eyre::{Context, Result}; +use std::any::Any; +use tokio::sync::{mpsc::Sender, oneshot}; + +pub mod channel; +#[cfg(feature = "python")] +mod python; +mod shared_lib; + +#[allow(unused_variables)] +pub fn run_operator( + node_id: &NodeId, + operator_definition: OperatorDefinition, + incoming_events: flume::Receiver, + events_tx: Sender, + init_done: oneshot::Sender>, + dataflow_descriptor: &Descriptor, +) -> eyre::Result<()> { + match &operator_definition.config.source { + OperatorSource::SharedLibrary(source) => { + shared_lib::run( + node_id, + &operator_definition.id, + source, + events_tx, + incoming_events, + init_done, + ) + .wrap_err_with(|| { + format!( + "failed to spawn shared library operator for {}", + operator_definition.id + ) + })?; + } + #[allow(unused_variables)] + OperatorSource::Python(source) => { + #[cfg(feature = "python")] + python::run( + node_id, + &operator_definition.id, + source, + events_tx, + incoming_events, + init_done, + dataflow_descriptor, + ) + .wrap_err_with(|| { + format!( + "failed to spawn Python operator for {}", + operator_definition.id + ) + })?; + #[cfg(not(feature = "python"))] + tracing::error!( + "Dora runtime tried spawning Python Operator outside of python environment." + ); + } + OperatorSource::Wasm(_) => { + tracing::error!("WASM operators are not supported yet"); + } + } + Ok(()) +} + +#[derive(Debug)] +#[allow(dead_code)] +pub enum OperatorEvent { + AllocateOutputSample { + len: usize, + sample: oneshot::Sender>, + }, + Output { + output_id: DataId, + type_info: ArrowTypeInfo, + parameters: MetadataParameters, + data: Option, + }, + Error(eyre::Error), + Panic(Box), + Finished { + reason: StopReason, + }, +} + +#[derive(Debug)] +pub enum StopReason { + InputsClosed, + ExplicitStop, + ExplicitStopAll, +} diff --git a/binaries/runtime/src/operator/python.rs b/binaries/runtime/src/operator/python.rs new file mode 100644 index 0000000000000000000000000000000000000000..82d01ebb9c5e2b90cfc8a815cc40ecdf0f5a68c9 --- /dev/null +++ b/binaries/runtime/src/operator/python.rs @@ -0,0 +1,379 @@ +#![allow(clippy::borrow_deref_ref)] // clippy warns about code generated by #[pymethods] + +use super::{OperatorEvent, StopReason}; +use dora_core::{ + config::{NodeId, OperatorId}, + descriptor::{source_is_url, Descriptor, PythonSource}, +}; +use dora_download::download_file; +use dora_node_api::Event; +use dora_operator_api_python::PyEvent; +use dora_operator_api_types::DoraStatus; +use eyre::{bail, eyre, Context, Result}; +use pyo3::{ + pyclass, + types::{IntoPyDict, PyAnyMethods, PyDict, PyTracebackMethods}, + Py, PyAny, Python, +}; +use std::{ + panic::{catch_unwind, AssertUnwindSafe}, + path::Path, +}; +use tokio::sync::{mpsc::Sender, oneshot}; +use tracing::{error, field, span, warn}; + +fn traceback(err: pyo3::PyErr) -> eyre::Report { + let traceback = Python::with_gil(|py| err.traceback_bound(py).and_then(|t| t.format().ok())); + if let Some(traceback) = traceback { + eyre::eyre!("{traceback}\n{err}") + } else { + eyre::eyre!("{err}") + } +} + +#[tracing::instrument(skip(events_tx, incoming_events), level = "trace")] +pub fn run( + node_id: &NodeId, + operator_id: &OperatorId, + python_source: &PythonSource, + events_tx: Sender, + incoming_events: flume::Receiver, + init_done: oneshot::Sender>, + dataflow_descriptor: &Descriptor, +) -> eyre::Result<()> { + let path = if source_is_url(&python_source.source) { + let target_path = Path::new("build") + .join(node_id.to_string()) + .join(format!("{}.py", operator_id)); + // try to download the shared library + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + rt.block_on(download_file(&python_source.source, &target_path)) + .wrap_err("failed to download Python operator")?; + target_path + } else { + Path::new(&python_source.source).to_owned() + }; + + if !path.exists() { + bail!("No python file exists at {}", path.display()); + } + let path = path + .canonicalize() + .wrap_err_with(|| format!("no file found at `{}`", path.display()))?; + let module_name = path + .file_stem() + .ok_or_else(|| eyre!("module path has no file stem"))? + .to_str() + .ok_or_else(|| eyre!("module file stem is not valid utf8"))?; + let path_parent = path.parent(); + + let send_output = SendOutputCallback { + events_tx: events_tx.clone(), + }; + + let init_operator = move |py: Python| { + if let Some(parent_path) = path_parent { + let parent_path = parent_path + .to_str() + .ok_or_else(|| eyre!("module path is not valid utf8"))?; + let sys = py + .import_bound("sys") + .wrap_err("failed to import `sys` module")?; + let sys_path = sys + .getattr("path") + .wrap_err("failed to import `sys.path` module")?; + let sys_path_append = sys_path + .getattr("append") + .wrap_err("`sys.path.append` was not found")?; + sys_path_append + .call1((parent_path,)) + .wrap_err("failed to append module path to python search path")?; + } + + let module = py.import_bound(module_name).map_err(traceback)?; + let operator_class = module + .getattr("Operator") + .wrap_err("no `Operator` class found in module")?; + + let locals = [("Operator", operator_class)].into_py_dict_bound(py); + let operator = py + .eval_bound("Operator()", None, Some(&locals)) + .map_err(traceback)?; + operator.setattr( + "dataflow_descriptor", + pythonize::pythonize(py, dataflow_descriptor)?, + )?; + + Result::<_, eyre::Report>::Ok(Py::from(operator)) + }; + + let python_runner = move || { + let mut operator = + match Python::with_gil(init_operator).wrap_err("failed to init python operator") { + Ok(op) => { + let _ = init_done.send(Ok(())); + op + } + Err(err) => { + let _ = init_done.send(Err(err)); + bail!("Could not init python operator") + } + }; + + let mut reload = false; + let reason = loop { + #[allow(unused_mut)] + let Ok(mut event) = incoming_events.recv() else { + break StopReason::InputsClosed; + }; + + if let Event::Reload { .. } = event { + reload = true; + // Reloading method + #[allow(clippy::blocks_in_conditions)] + match Python::with_gil(|py| -> Result> { + // Saving current state + let current_state = operator + .getattr(py, "__dict__") + .wrap_err("Could not retrieve current operator state")?; + let current_state = current_state + .extract::<&PyDict>(py) + .wrap_err("could not extract operator state as a PyDict")?; + // Reload module + let module = py + .import_bound(module_name) + .map_err(traceback) + .wrap_err(format!("Could not retrieve {module_name} while reloading"))?; + let importlib = py + .import_bound("importlib") + .wrap_err("failed to import `importlib` module")?; + let module = importlib + .call_method("reload", (module,), None) + .wrap_err(format!("Could not reload {module_name} while reloading"))?; + let reloaded_operator_class = module + .getattr("Operator") + .wrap_err("no `Operator` class found in module")?; + + // Create a new reloaded operator + let locals = [("Operator", reloaded_operator_class)].into_py_dict_bound(py); + let operator: Py = py + .eval_bound("Operator()", None, Some(&locals)) + .map_err(traceback) + .wrap_err("Could not initialize reloaded operator")? + .into(); + + // Replace initialized state with current state + operator + .getattr(py, "__dict__") + .wrap_err("Could not retrieve new operator state")? + .extract::<&PyDict>(py) + .wrap_err("could not extract new operator state as a PyDict")? + .update(current_state.as_mapping()) + .wrap_err("could not restore operator state")?; + + Ok(operator) + }) { + Ok(reloaded_operator) => { + operator = reloaded_operator; + } + Err(err) => { + error!("Failed to reload operator.\n {err}"); + } + } + } + + let status = Python::with_gil(|py| -> Result { + let span = span!(tracing::Level::TRACE, "on_event", input_id = field::Empty); + let _ = span.enter(); + + // Add metadata context if we have a tracer and + // incoming input has some metadata. + #[cfg(feature = "telemetry")] + if let Event::Input { + id: input_id, + metadata, + .. + } = &mut event + { + use dora_tracing::telemetry::{deserialize_context, serialize_context}; + use tracing_opentelemetry::OpenTelemetrySpanExt; + span.record("input_id", input_id.as_str()); + + let cx = deserialize_context(&metadata.parameters.open_telemetry_context); + span.set_parent(cx); + let cx = span.context(); + let string_cx = serialize_context(&cx); + metadata.parameters.open_telemetry_context = string_cx; + } + + let py_event = PyEvent::from(event); + + let status_enum = operator + .call_method1(py, "on_event", (py_event, send_output.clone())) + .map_err(traceback); + match status_enum { + Ok(status_enum) => { + let status_val = Python::with_gil(|py| status_enum.getattr(py, "value")) + .wrap_err("on_event must have enum return value")?; + Python::with_gil(|py| status_val.extract(py)) + .wrap_err("on_event has invalid return value") + } + Err(err) => { + if reload { + // Allow error in hot reloading environment to help development. + warn!("{err}"); + Ok(DoraStatus::Continue as i32) + } else { + Err(err) + } + } + } + })?; + match status { + s if s == DoraStatus::Continue as i32 => {} // ok + s if s == DoraStatus::Stop as i32 => break StopReason::ExplicitStop, + s if s == DoraStatus::StopAll as i32 => break StopReason::ExplicitStopAll, + other => bail!("on_event returned invalid status {other}"), + } + }; + + // Dropping the operator using Python garbage collector. + // Locking the GIL for immediate release. + Python::with_gil(|_py| { + drop(operator); + }); + + Result::<_, eyre::Report>::Ok(reason) + }; + + let closure = AssertUnwindSafe(|| { + python_runner().wrap_err_with(|| format!("error in Python module at {}", path.display())) + }); + + match catch_unwind(closure) { + Ok(Ok(reason)) => { + let _ = events_tx.blocking_send(OperatorEvent::Finished { reason }); + } + Ok(Err(err)) => { + let _ = events_tx.blocking_send(OperatorEvent::Error(err)); + } + Err(panic) => { + let _ = events_tx.blocking_send(OperatorEvent::Panic(panic)); + } + } + + Ok(()) +} + +#[pyclass] +#[derive(Clone)] +struct SendOutputCallback { + events_tx: Sender, +} + +#[allow(unsafe_op_in_unsafe_fn)] +mod callback_impl { + + use crate::operator::OperatorEvent; + + use super::SendOutputCallback; + use aligned_vec::{AVec, ConstAlign}; + use arrow::{array::ArrayData, pyarrow::FromPyArrow}; + use dora_core::message::ArrowTypeInfo; + use dora_node_api::{ + arrow_utils::{copy_array_into_sample, required_data_size}, + ZERO_COPY_THRESHOLD, + }; + use dora_operator_api_python::pydict_to_metadata; + use dora_tracing::telemetry::deserialize_context; + use eyre::{eyre, Context, Result}; + use pyo3::{ + pymethods, + types::{PyBytes, PyBytesMethods, PyDict}, + Bound, PyObject, Python, + }; + use tokio::sync::oneshot; + use tracing::{field, span}; + use tracing_opentelemetry::OpenTelemetrySpanExt; + + /// Send an output from the operator: + /// - the first argument is the `output_id` as defined in your dataflow. + /// - the second argument is the data as either bytes or pyarrow.Array for zero copy. + /// - the third argument is dora metadata if you want to link the tracing from one input into an output. + /// `e.g.: send_output("bbox", pa.array([100], type=pa.uint8()), dora_event["metadata"])` + #[pymethods] + impl SendOutputCallback { + fn __call__( + &mut self, + output: &str, + data: PyObject, + metadata: Option>, + py: Python, + ) -> Result<()> { + let parameters = pydict_to_metadata(metadata) + .wrap_err("failed to parse metadata")? + .into_owned(); + let span = span!( + tracing::Level::TRACE, + "send_output", + output_id = field::Empty + ); + span.record("output_id", output); + + let cx = deserialize_context(¶meters.open_telemetry_context); + span.set_parent(cx); + let _ = span.enter(); + + let allocate_sample = |data_len| { + if data_len > ZERO_COPY_THRESHOLD { + let (tx, rx) = oneshot::channel(); + self.events_tx + .blocking_send(OperatorEvent::AllocateOutputSample { + len: data_len, + sample: tx, + }) + .map_err(|_| eyre!("failed to send output to runtime"))?; + rx.blocking_recv() + .wrap_err("failed to request output sample")? + .wrap_err("failed to allocate output sample") + } else { + let avec: AVec> = AVec::__from_elem(128, 0, data_len); + + Ok(avec.into()) + } + }; + + let (sample, type_info) = if let Ok(py_bytes) = data.downcast_bound::(py) { + let data = py_bytes.as_bytes(); + let mut sample = allocate_sample(data.len())?; + sample.copy_from_slice(data); + (sample, ArrowTypeInfo::byte_array(data.len())) + } else if let Ok(arrow_array) = ArrayData::from_pyarrow_bound(data.bind(py)) { + let total_len = required_data_size(&arrow_array); + let mut sample = allocate_sample(total_len)?; + + let type_info = copy_array_into_sample(&mut sample, &arrow_array); + + (sample, type_info) + } else { + eyre::bail!("invalid `data` type, must by `PyBytes` or arrow array") + }; + + py.allow_threads(|| { + let event = OperatorEvent::Output { + output_id: output.to_owned().into(), + type_info, + parameters, + data: Some(sample), + }; + self.events_tx + .blocking_send(event) + .map_err(|_| eyre!("failed to send output to runtime")) + })?; + + Ok(()) + } + } +} diff --git a/binaries/runtime/src/operator/shared_lib.rs b/binaries/runtime/src/operator/shared_lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..984a760b93dbf86bb0005b711ba56c622d93a8fd --- /dev/null +++ b/binaries/runtime/src/operator/shared_lib.rs @@ -0,0 +1,297 @@ +use super::{OperatorEvent, StopReason}; +use aligned_vec::{AVec, ConstAlign}; +use dora_core::{ + adjust_shared_library_path, + config::{DataId, NodeId, OperatorId}, + descriptor::source_is_url, +}; +use dora_download::download_file; +use dora_node_api::{ + arrow_utils::{copy_array_into_sample, required_data_size}, + Event, MetadataParameters, +}; +use dora_operator_api_types::{ + safer_ffi::closure::ArcDynFn1, DoraDropOperator, DoraInitOperator, DoraInitResult, DoraOnEvent, + DoraResult, DoraStatus, Metadata, OnEventResult, Output, SendOutput, +}; +use eyre::{bail, eyre, Context, Result}; +use libloading::Symbol; +use std::{ + ffi::c_void, + panic::{catch_unwind, AssertUnwindSafe}, + path::Path, + sync::Arc, +}; +use tokio::sync::{mpsc::Sender, oneshot}; +use tracing::{field, span}; + +pub fn run( + node_id: &NodeId, + operator_id: &OperatorId, + source: &str, + events_tx: Sender, + incoming_events: flume::Receiver, + init_done: oneshot::Sender>, +) -> eyre::Result<()> { + let path = if source_is_url(source) { + let target_path = adjust_shared_library_path( + &Path::new("build") + .join(node_id.to_string()) + .join(operator_id.to_string()), + )?; + // try to download the shared library + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + rt.block_on(download_file(source, &target_path)) + .wrap_err("failed to download shared library operator")?; + target_path + } else { + adjust_shared_library_path(Path::new(source))? + }; + + let library = unsafe { + libloading::Library::new(&path) + .wrap_err_with(|| format!("failed to load shared library at `{}`", path.display()))? + }; + + let closure = AssertUnwindSafe(|| { + let bindings = Bindings::init(&library).context("failed to init operator")?; + + let operator = SharedLibraryOperator { + incoming_events, + bindings, + events_tx: events_tx.clone(), + }; + + operator.run(init_done) + }); + match catch_unwind(closure) { + Ok(Ok(reason)) => { + let _ = events_tx.blocking_send(OperatorEvent::Finished { reason }); + } + Ok(Err(err)) => { + let _ = events_tx.blocking_send(OperatorEvent::Error(err)); + } + Err(panic) => { + let _ = events_tx.blocking_send(OperatorEvent::Panic(panic)); + } + } + + Ok(()) +} + +struct SharedLibraryOperator<'lib> { + incoming_events: flume::Receiver, + events_tx: Sender, + + bindings: Bindings<'lib>, +} + +impl<'lib> SharedLibraryOperator<'lib> { + fn run(self, init_done: oneshot::Sender>) -> eyre::Result { + let operator_context = { + let DoraInitResult { + result, + operator_context, + } = unsafe { (self.bindings.init_operator.init_operator)() }; + let raw = match result.error { + Some(error) => { + let _ = init_done.send(Err(eyre!(error.to_string()))); + bail!("init_operator failed: {}", *error) + } + None => operator_context, + }; + OperatorContext { + raw, + drop_fn: self.bindings.drop_operator.clone(), + } + }; + + let _ = init_done.send(Ok(())); + + let send_output_closure = Arc::new(move |output: Output| { + let Output { + id: output_id, + data_array, + schema, + metadata: Metadata { + open_telemetry_context, + }, + } = output; + let parameters = MetadataParameters { + open_telemetry_context: open_telemetry_context.into(), + ..Default::default() + }; + + let arrow_array = match unsafe { arrow::ffi::from_ffi(data_array, &schema) } { + Ok(a) => a, + Err(err) => return DoraResult::from_error(err.to_string()), + }; + + let total_len = required_data_size(&arrow_array); + let mut sample: AVec> = AVec::__from_elem(128, 0, total_len); + + let type_info = copy_array_into_sample(&mut sample, &arrow_array); + + let event = OperatorEvent::Output { + output_id: DataId::from(String::from(output_id)), + type_info, + parameters, + data: Some(sample.into()), + }; + + let result = self + .events_tx + .blocking_send(event) + .map_err(|_| eyre!("failed to send output to runtime")); + + match result { + Ok(()) => DoraResult::SUCCESS, + Err(_) => DoraResult::from_error("runtime process closed unexpectedly".into()), + } + }); + + let reason = loop { + #[allow(unused_mut)] + let Ok(mut event) = self.incoming_events.recv() else { + break StopReason::InputsClosed; + }; + + let span = span!(tracing::Level::TRACE, "on_event", input_id = field::Empty); + let _ = span.enter(); + // Add metadata context if we have a tracer and + // incoming input has some metadata. + #[cfg(feature = "telemetry")] + if let Event::Input { + id: input_id, + metadata, + .. + } = &mut event + { + use dora_tracing::telemetry::{deserialize_context, serialize_context}; + use tracing_opentelemetry::OpenTelemetrySpanExt; + span.record("input_id", input_id.as_str()); + + let cx = deserialize_context(&metadata.parameters.open_telemetry_context); + span.set_parent(cx); + let cx = span.context(); + let string_cx = serialize_context(&cx); + metadata.parameters.open_telemetry_context = string_cx; + } + + let mut operator_event = match event { + Event::Stop => dora_operator_api_types::RawEvent { + input: None, + input_closed: None, + stop: true, + error: None, + }, + Event::Input { + id: input_id, + metadata, + data, + } => { + let (data_array, schema) = arrow::ffi::to_ffi(&data.to_data())?; + + let operator_input = dora_operator_api_types::Input { + id: String::from(input_id).into(), + data_array: Some(data_array), + schema, + metadata: Metadata { + open_telemetry_context: metadata + .parameters + .open_telemetry_context + .into(), + }, + }; + dora_operator_api_types::RawEvent { + input: Some(Box::new(operator_input).into()), + input_closed: None, + stop: false, + error: None, + } + } + Event::InputClosed { id: input_id } => dora_operator_api_types::RawEvent { + input_closed: Some(input_id.to_string().into()), + input: None, + stop: false, + error: None, + }, + Event::Reload { .. } => { + // Reloading shared lib operator is not supported. See: https://github.com/dora-rs/dora/pull/239#discussion_r1154313139 + continue; + } + Event::Error(err) => dora_operator_api_types::RawEvent { + error: Some(err.into()), + input_closed: None, + input: None, + stop: false, + }, + other => { + tracing::warn!("unexpected event: {other:?}"); + continue; + } + }; + + let send_output = SendOutput { + send_output: ArcDynFn1::new(send_output_closure.clone()), + }; + let OnEventResult { + result: DoraResult { error }, + status, + } = unsafe { + (self.bindings.on_event.on_event)( + &mut operator_event, + &send_output, + operator_context.raw, + ) + }; + match error { + Some(error) => bail!("on_input failed: {}", *error), + None => match status { + DoraStatus::Continue => {} + DoraStatus::Stop => break StopReason::ExplicitStop, + DoraStatus::StopAll => break StopReason::ExplicitStopAll, + }, + } + }; + Ok(reason) + } +} + +struct OperatorContext<'lib> { + raw: *mut c_void, + drop_fn: Symbol<'lib, DoraDropOperator>, +} + +impl<'lib> Drop for OperatorContext<'lib> { + fn drop(&mut self) { + unsafe { (self.drop_fn.drop_operator)(self.raw) }; + } +} + +struct Bindings<'lib> { + init_operator: Symbol<'lib, DoraInitOperator>, + drop_operator: Symbol<'lib, DoraDropOperator>, + on_event: Symbol<'lib, DoraOnEvent>, +} + +impl<'lib> Bindings<'lib> { + fn init(library: &'lib libloading::Library) -> Result { + let bindings = unsafe { + Bindings { + init_operator: library + .get(b"dora_init_operator") + .wrap_err("failed to get `dora_init_operator`")?, + drop_operator: library + .get(b"dora_drop_operator") + .wrap_err("failed to get `dora_drop_operator`")?, + on_event: library + .get(b"dora_on_event") + .wrap_err("failed to get `dora_on_event`")?, + } + }; + Ok(bindings) + } +} diff --git a/docs/src/latency.png b/docs/src/latency.png new file mode 100644 index 0000000000000000000000000000000000000000..eb925f6f23612954375eeea0f3eec5fc23d0c5dd --- /dev/null +++ b/docs/src/latency.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f8936a521a92b788a8c849e43ab222d214a2f74c137fd7a821f4c42baba340b +size 212915 diff --git a/docs/src/logo.svg b/docs/src/logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..48898fc17954f49247a1abbb5e9d630a500f2fa8 --- /dev/null +++ b/docs/src/logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/dora-robomaster/.gitattributes b/dora-robomaster/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..4b45de3115f471f9dcf5a7130757a284b3500fc6 --- /dev/null +++ b/dora-robomaster/.gitattributes @@ -0,0 +1,5 @@ +*.arrow filter=lfs diff=lfs merge=lfs -text +*.mkv filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +graphs/out/**/*.txt filter=lfs diff=lfs merge=lfs -text diff --git a/dora-robomaster/.gitignore b/dora-robomaster/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b835d9c44bf3f32761fc45e6b0804e40cfde882a --- /dev/null +++ b/dora-robomaster/.gitignore @@ -0,0 +1,6 @@ +graphs/yolov5n.pt +*.pt +operators/__pycache__/ +__pycache__/ +*.avi +*.txt \ No newline at end of file diff --git a/dora-robomaster/README.md b/dora-robomaster/README.md new file mode 100644 index 0000000000000000000000000000000000000000..19f8fdc38f418ff775085c76c1f97fa393193a5c --- /dev/null +++ b/dora-robomaster/README.md @@ -0,0 +1,126 @@ +--- +configs: + - config_name: image + data_files: + - split: train + path: graphs/out/*/image.parquet + - config_name: mistral + data_files: + - split: train + path: graphs/out/*/mistral_output_file.parquet + - config_name: chatgpt + data_files: + - split: train + path: graphs/out/*/chatgpt_output_file.parquet + - config_name: raw_file + data_files: + - split: train + path: graphs/out/*/raw_file.parquet + - config_name: saved_file + data_files: + - split: train + path: graphs/out/*/saved_file.parquet + - config_name: audio + data_files: + - split: train + path: graphs/out/*/audio.parquet + - config_name: whisper_text + data_files: + - split: train + path: graphs/out/*/whisper_text.parquet + - config_name: control + data_files: + - split: train + path: graphs/out/*/control.parquet + - config_name: gimbal_control + data_files: + - split: train + path: graphs/out/*/gimbal_control.parquet + - config_name: logs + data_files: + - split: train + path: graphs/out/*.txt +license: apache-2.0 +language: + - en +tags: + - dora + - robotic +--- + +# Dora-Robomaster + +This project aims to use Dora to enhance the capabilities of a RoboMaster S1. + +You can see a quick demo here: + +[![Demo](http://img.youtube.com/vi/NvvTEP8Jak8/0.jpg)](http://www.youtube.com/watch?v=NvvTEP8Jak8) + +### Getting Started + +command to start the demo: + +```bash +alias dora='dora-cli' +dora up +dora start graphs/dataflow.yml --attach +``` + +start the reaction lighting test: +`dora start graphs/reaction.yml --attach` + +## Installation of the Robomaster S1 Hack + +This guide is an updated version of the original [Robomaster S1 SDK Hack Guide](https://www.bug-br.org.br/s1_sdk_hack.zip) and is intended for use on a Windows 11 system. + +### Prerequisites + +Before you get started, you'll need the following: + +- Robomaster S1 (do not update it to the latest version, as it may block the hack). +- [Robomaster App](https://www.dji.com/fr/robomaster-s1/downloads). +- [Android SDK Platform-Tools](https://developer.android.com/tools/releases/platform-tools). Simply unzip it and keep the path handy. +- A micro USB cable. If this guide doesn't work, there might be an issue with the cable, and you may need to replace it with one that supports data transfer. + +### Instructions + +1. Start the Robomaster App and connect the Robomaster S1 using one of the two options provided (via router or via Wi-Fi). +2. While connected, use a micro USB cable to connect the robot to the computer's USB port. You should hear a beep sound, similar to when you connect any device. (Please note that no other Android device should be connected via USB during this process). +3. In the Lab section of the app, create a new Python application and paste the following code: + + ```python + def root_me(module): + __import__ = rm_define.__dict__['__builtins__']['__import__'] + return __import__(module, globals(), locals(), [], 0) + + builtins = root_me('builtins') + subprocess = root_me('subprocess') + proc = subprocess.Popen('/system/bin/adb_en.sh', shell=True, executable='/system/bin/sh', stdout=subprocess.PIPE, stderr=subprocess.PIPE) + ``` + +4. Run the code; there should be no errors, and the console should display **Execution Complete** +5. Without closing the app, navigate to the folder containing the Android SDK Platform-Tools and open a terminal inside it. +6. Run the ADP command `.\adb.exe devices `. If everything is working correctly, you should see output similar to this: ![image](https://github.com/Felixhuangsiling/Dora-Robomaster/assets/77993249/dc6368ec-052c-4b18-8fdc-0ec314adb073) +7. Execute the upload.sh script located in the folder `s1_SDK`. +8. Once everything has been executed, restart the S1 by turning it off and then back on. While it's booting up, you should hear two chimes instead of the usual single chime, indicating that the hack has been successful. + +## HuggingFace Dataset + +To set up this repo as a dataset repository: + +```bash +git lfs install +git clone https://huggingface.co/datasets/haixuantao/dora-robomaster + +# if you want to clone without large files – just their pointers +# prepend your git clone with the following env var: +GIT_LFS_SKIP_SMUDGE=1 +``` + +To use the dataset: + +```python +from datasets import load_dataset + +dataset = load_dataset("haixuantao/dora-robomaster") +``` diff --git a/dora-robomaster/graphs/dataflow.yml b/dora-robomaster/graphs/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..abd9c26343c7d1ab9e922a94e5c1ca851ee44cfe --- /dev/null +++ b/dora-robomaster/graphs/dataflow.yml @@ -0,0 +1,169 @@ +nodes: + - id: robot + operator: + # python: /home/peiji/anaconda3/envs/dora38/bin/python + # args: ../operators/robot.py + python: ../operators/robot.py + inputs: + blaster: + source: llm/blaster + queue_size: 1 + led: + source: llm/led + queue_size: 1 + control: + source: planning/control + queue_size: 1 + gimbal_control: + source: planning/gimbal_control + queue_size: 1 + tick: + source: dora/timer/millis/100 + queue_size: 1 + outputs: + - position + + - id: bot_webcam + custom: + source: ../operators/opencv_stream.py + outputs: + - image + + - id: object_detection + operator: + python: ../operators/object_detection.py + inputs: + image: bot_webcam/image + outputs: + - bbox + + ### Second Camera + - id: webcam + operator: + python: ../operators/webcam.py + inputs: + tick: + source: dora/timer/millis/50 + queue_size: 1 + outputs: + - image + + - id: plot_webcam + operator: + python: ../operators/plot.py + inputs: + image: webcam/image + text: whisper/text + + + - id: plot_bot + operator: + python: ../operators/plot.py + inputs: + image: bot_webcam/image + keyboard_buffer: keyboard/buffer + user_message: keyboard/submitted + assistant_message: llm/assistant_message + bbox: object_detection/bbox + + + - id: planning + operator: + python: ../operators/planning_op.py + inputs: + position: robot/position + bbox: object_detection/bbox + tick: dora/timer/millis/100 + outputs: + - control + - gimbal_control + - led + - blaster + + ## Speech to text + - id: keyboard + custom: + source: ../operators/keyboard_op.py + outputs: + - buffer + - submitted + - record + - ask + - send + - change + inputs: + recording: whisper/text + + - id: microphone + operator: + python: ../operators/microphone_op.py + inputs: + record: keyboard/record + outputs: + - audio + + - id: whisper + operator: + python: ../operators/whisper_op.py + inputs: + audio: microphone/audio + outputs: + - text + + # ## Code Modifier + # - id: vectordb + # operator: + # python: ../operators/sentence_transformers_op.py + # inputs: + # query: keyboard/change + # saved_file: file_saver/saved_file + # outputs: + # - raw_file + + - id: llm + operator: + python: ../operators/llm_op.py + inputs: + # code_modifier: vectordb/raw_file + assistant: keyboard/ask + message_sender: keyboard/send + outputs: + - modified_file + - assistant_message + - line + - control + - led + - blaster + - rotation + + - id: file_saver + operator: + python: ../operators/file_saver_op.py + inputs: + llm_output_file: llm/modified_file + outputs: + - saved_file + + - id: dora-record + custom: + source: dora-record + inputs: + llm_output_file: llm/modified_file + # raw_file: vectordb/raw_file + saved_file: file_saver/saved_file + audio: microphone/audio + whisper_text: whisper/text + bbox: object_detection/bbox + image: cv2_encoder/encoded_image + position: robot/position + control: planning/control + gimbal_control: planning/gimbal_control + + ### Second Camera + - id: cv2_encoder + operator: + python: ../operators/cv2_encoder_op.py + inputs: + image: bot_webcam/image + outputs: + - encoded_image diff --git a/dora-robomaster/graphs/merge.py b/dora-robomaster/graphs/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..59dc964ae9665abba89065be451ac87ffb25782c --- /dev/null +++ b/dora-robomaster/graphs/merge.py @@ -0,0 +1,30 @@ +import pyarrow as pa + +with pa.memory_map( + "graphs/out/57926662-ef6c-4d33-b456-aa79b8c0fc60/whisper_text.arrow", "r" +) as source: + df_w = pa.RecordBatchStreamReader(source).read_all() + +with pa.memory_map( + "graphs/out/57926662-ef6c-4d33-b456-aa79b8c0fc60/saved_file.arrow", "r" +) as source: + df_i = pa.RecordBatchStreamReader(source).read_all() + + +df_w = df_w.to_pandas() +df_i = df_i.to_pandas() + +df_i["origin"] = df_i["saved_file"].map(lambda x: x[0]["origin"]) +df_w["whisper_text"] = df_w["whisper_text"].map(lambda x: x[0]) + +df = df_i.merge(df_w, on="trace_id") + +print(df) + +print(df.columns) + + +print(df.groupby(by=["trace_id", "origin"]).count()) + + +print(df.groupby(by=["whisper_text", "origin"]).count()) diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_bot_webcam.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_bot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d0d7bcb5e5a0f03b474e4f8267e6b89480acca1 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_bot_webcam.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/opencv_stream.py", line 5, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_cv2_encoder.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_cv2_encoder.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_cv2_encoder.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_dora-record.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_dora-record.txt new file mode 100644 index 0000000000000000000000000000000000000000..0e72545cef3e8e5601cef520213c490a60b4d010 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_dora-record.txt @@ -0,0 +1,11 @@ +Error: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + /home/peiji/.cargo/registry/src/index.crates.io-6f17d22bba15001f/dora-node-api-0.3.4/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_file_saver.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_file_saver.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_file_saver.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_keyboard.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_keyboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..b15d9a82e910b59ef104906dee2a08f0de17b5c0 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_keyboard.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/keyboard_op.py", line 7, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_llm.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..daedc640d8ecccda7f3ab16445ec4b0c5ba2790c --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_llm.txt @@ -0,0 +1,25 @@ +CUDA extension not installed. +CUDA extension not installed. +/home/peiji/anaconda3/envs/cu122/lib/python3.10/site-packages/transformers/modeling_utils.py:4371: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( +The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +Some weights of the model checkpoint at /home/peiji/deepseek-coder-6.7B-instruct-GPTQ/ were not used when initializing LlamaForCausalLM: ['model.layers.0.mlp.down_proj.bias', 'model.layers.0.mlp.gate_proj.bias', 'model.layers.0.mlp.up_proj.bias', 'model.layers.0.self_attn.k_proj.bias', 'model.layers.0.self_attn.o_proj.bias', 'model.layers.0.self_attn.q_proj.bias', 'model.layers.0.self_attn.v_proj.bias', 'model.layers.1.mlp.down_proj.bias', 'model.layers.1.mlp.gate_proj.bias', 'model.layers.1.mlp.up_proj.bias', 'model.layers.1.self_attn.k_proj.bias', 'model.layers.1.self_attn.o_proj.bias', 'model.layers.1.self_attn.q_proj.bias', 'model.layers.1.self_attn.v_proj.bias', 'model.layers.10.mlp.down_proj.bias', 'model.layers.10.mlp.gate_proj.bias', 'model.layers.10.mlp.up_proj.bias', 'model.layers.10.self_attn.k_proj.bias', 'model.layers.10.self_attn.o_proj.bias', 'model.layers.10.self_attn.q_proj.bias', 'model.layers.10.self_attn.v_proj.bias', 'model.layers.11.mlp.down_proj.bias', 'model.layers.11.mlp.gate_proj.bias', 'model.layers.11.mlp.up_proj.bias', 'model.layers.11.self_attn.k_proj.bias', 'model.layers.11.self_attn.o_proj.bias', 'model.layers.11.self_attn.q_proj.bias', 'model.layers.11.self_attn.v_proj.bias', 'model.layers.12.mlp.down_proj.bias', 'model.layers.12.mlp.gate_proj.bias', 'model.layers.12.mlp.up_proj.bias', 'model.layers.12.self_attn.k_proj.bias', 'model.layers.12.self_attn.o_proj.bias', 'model.layers.12.self_attn.q_proj.bias', 'model.layers.12.self_attn.v_proj.bias', 'model.layers.13.mlp.down_proj.bias', 'model.layers.13.mlp.gate_proj.bias', 'model.layers.13.mlp.up_proj.bias', 'model.layers.13.self_attn.k_proj.bias', 'model.layers.13.self_attn.o_proj.bias', 'model.layers.13.self_attn.q_proj.bias', 'model.layers.13.self_attn.v_proj.bias', 'model.layers.14.mlp.down_proj.bias', 'model.layers.14.mlp.gate_proj.bias', 'model.layers.14.mlp.up_proj.bias', 'model.layers.14.self_attn.k_proj.bias', 'model.layers.14.self_attn.o_proj.bias', 'model.layers.14.self_attn.q_proj.bias', 'model.layers.14.self_attn.v_proj.bias', 'model.layers.15.mlp.down_proj.bias', 'model.layers.15.mlp.gate_proj.bias', 'model.layers.15.mlp.up_proj.bias', 'model.layers.15.self_attn.k_proj.bias', 'model.layers.15.self_attn.o_proj.bias', 'model.layers.15.self_attn.q_proj.bias', 'model.layers.15.self_attn.v_proj.bias', 'model.layers.16.mlp.down_proj.bias', 'model.layers.16.mlp.gate_proj.bias', 'model.layers.16.mlp.up_proj.bias', 'model.layers.16.self_attn.k_proj.bias', 'model.layers.16.self_attn.o_proj.bias', 'model.layers.16.self_attn.q_proj.bias', 'model.layers.16.self_attn.v_proj.bias', 'model.layers.17.mlp.down_proj.bias', 'model.layers.17.mlp.gate_proj.bias', 'model.layers.17.mlp.up_proj.bias', 'model.layers.17.self_attn.k_proj.bias', 'model.layers.17.self_attn.o_proj.bias', 'model.layers.17.self_attn.q_proj.bias', 'model.layers.17.self_attn.v_proj.bias', 'model.layers.18.mlp.down_proj.bias', 'model.layers.18.mlp.gate_proj.bias', 'model.layers.18.mlp.up_proj.bias', 'model.layers.18.self_attn.k_proj.bias', 'model.layers.18.self_attn.o_proj.bias', 'model.layers.18.self_attn.q_proj.bias', 'model.layers.18.self_attn.v_proj.bias', 'model.layers.19.mlp.down_proj.bias', 'model.layers.19.mlp.gate_proj.bias', 'model.layers.19.mlp.up_proj.bias', 'model.layers.19.self_attn.k_proj.bias', 'model.layers.19.self_attn.o_proj.bias', 'model.layers.19.self_attn.q_proj.bias', 'model.layers.19.self_attn.v_proj.bias', 'model.layers.2.mlp.down_proj.bias', 'model.layers.2.mlp.gate_proj.bias', 'model.layers.2.mlp.up_proj.bias', 'model.layers.2.self_attn.k_proj.bias', 'model.layers.2.self_attn.o_proj.bias', 'model.layers.2.self_attn.q_proj.bias', 'model.layers.2.self_attn.v_proj.bias', 'model.layers.20.mlp.down_proj.bias', 'model.layers.20.mlp.gate_proj.bias', 'model.layers.20.mlp.up_proj.bias', 'model.layers.20.self_attn.k_proj.bias', 'model.layers.20.self_attn.o_proj.bias', 'model.layers.20.self_attn.q_proj.bias', 'model.layers.20.self_attn.v_proj.bias', 'model.layers.21.mlp.down_proj.bias', 'model.layers.21.mlp.gate_proj.bias', 'model.layers.21.mlp.up_proj.bias', 'model.layers.21.self_attn.k_proj.bias', 'model.layers.21.self_attn.o_proj.bias', 'model.layers.21.self_attn.q_proj.bias', 'model.layers.21.self_attn.v_proj.bias', 'model.layers.22.mlp.down_proj.bias', 'model.layers.22.mlp.gate_proj.bias', 'model.layers.22.mlp.up_proj.bias', 'model.layers.22.self_attn.k_proj.bias', 'model.layers.22.self_attn.o_proj.bias', 'model.layers.22.self_attn.q_proj.bias', 'model.layers.22.self_attn.v_proj.bias', 'model.layers.23.mlp.down_proj.bias', 'model.layers.23.mlp.gate_proj.bias', 'model.layers.23.mlp.up_proj.bias', 'model.layers.23.self_attn.k_proj.bias', 'model.layers.23.self_attn.o_proj.bias', 'model.layers.23.self_attn.q_proj.bias', 'model.layers.23.self_attn.v_proj.bias', 'model.layers.24.mlp.down_proj.bias', 'model.layers.24.mlp.gate_proj.bias', 'model.layers.24.mlp.up_proj.bias', 'model.layers.24.self_attn.k_proj.bias', 'model.layers.24.self_attn.o_proj.bias', 'model.layers.24.self_attn.q_proj.bias', 'model.layers.24.self_attn.v_proj.bias', 'model.layers.25.mlp.down_proj.bias', 'model.layers.25.mlp.gate_proj.bias', 'model.layers.25.mlp.up_proj.bias', 'model.layers.25.self_attn.k_proj.bias', 'model.layers.25.self_attn.o_proj.bias', 'model.layers.25.self_attn.q_proj.bias', 'model.layers.25.self_attn.v_proj.bias', 'model.layers.26.mlp.down_proj.bias', 'model.layers.26.mlp.gate_proj.bias', 'model.layers.26.mlp.up_proj.bias', 'model.layers.26.self_attn.k_proj.bias', 'model.layers.26.self_attn.o_proj.bias', 'model.layers.26.self_attn.q_proj.bias', 'model.layers.26.self_attn.v_proj.bias', 'model.layers.27.mlp.down_proj.bias', 'model.layers.27.mlp.gate_proj.bias', 'model.layers.27.mlp.up_proj.bias', 'model.layers.27.self_attn.k_proj.bias', 'model.layers.27.self_attn.o_proj.bias', 'model.layers.27.self_attn.q_proj.bias', 'model.layers.27.self_attn.v_proj.bias', 'model.layers.28.mlp.down_proj.bias', 'model.layers.28.mlp.gate_proj.bias', 'model.layers.28.mlp.up_proj.bias', 'model.layers.28.self_attn.k_proj.bias', 'model.layers.28.self_attn.o_proj.bias', 'model.layers.28.self_attn.q_proj.bias', 'model.layers.28.self_attn.v_proj.bias', 'model.layers.29.mlp.down_proj.bias', 'model.layers.29.mlp.gate_proj.bias', 'model.layers.29.mlp.up_proj.bias', 'model.layers.29.self_attn.k_proj.bias', 'model.layers.29.self_attn.o_proj.bias', 'model.layers.29.self_attn.q_proj.bias', 'model.layers.29.self_attn.v_proj.bias', 'model.layers.3.mlp.down_proj.bias', 'model.layers.3.mlp.gate_proj.bias', 'model.layers.3.mlp.up_proj.bias', 'model.layers.3.self_attn.k_proj.bias', 'model.layers.3.self_attn.o_proj.bias', 'model.layers.3.self_attn.q_proj.bias', 'model.layers.3.self_attn.v_proj.bias', 'model.layers.30.mlp.down_proj.bias', 'model.layers.30.mlp.gate_proj.bias', 'model.layers.30.mlp.up_proj.bias', 'model.layers.30.self_attn.k_proj.bias', 'model.layers.30.self_attn.o_proj.bias', 'model.layers.30.self_attn.q_proj.bias', 'model.layers.30.self_attn.v_proj.bias', 'model.layers.31.mlp.down_proj.bias', 'model.layers.31.mlp.gate_proj.bias', 'model.layers.31.mlp.up_proj.bias', 'model.layers.31.self_attn.k_proj.bias', 'model.layers.31.self_attn.o_proj.bias', 'model.layers.31.self_attn.q_proj.bias', 'model.layers.31.self_attn.v_proj.bias', 'model.layers.4.mlp.down_proj.bias', 'model.layers.4.mlp.gate_proj.bias', 'model.layers.4.mlp.up_proj.bias', 'model.layers.4.self_attn.k_proj.bias', 'model.layers.4.self_attn.o_proj.bias', 'model.layers.4.self_attn.q_proj.bias', 'model.layers.4.self_attn.v_proj.bias', 'model.layers.5.mlp.down_proj.bias', 'model.layers.5.mlp.gate_proj.bias', 'model.layers.5.mlp.up_proj.bias', 'model.layers.5.self_attn.k_proj.bias', 'model.layers.5.self_attn.o_proj.bias', 'model.layers.5.self_attn.q_proj.bias', 'model.layers.5.self_attn.v_proj.bias', 'model.layers.6.mlp.down_proj.bias', 'model.layers.6.mlp.gate_proj.bias', 'model.layers.6.mlp.up_proj.bias', 'model.layers.6.self_attn.k_proj.bias', 'model.layers.6.self_attn.o_proj.bias', 'model.layers.6.self_attn.q_proj.bias', 'model.layers.6.self_attn.v_proj.bias', 'model.layers.7.mlp.down_proj.bias', 'model.layers.7.mlp.gate_proj.bias', 'model.layers.7.mlp.up_proj.bias', 'model.layers.7.self_attn.k_proj.bias', 'model.layers.7.self_attn.o_proj.bias', 'model.layers.7.self_attn.q_proj.bias', 'model.layers.7.self_attn.v_proj.bias', 'model.layers.8.mlp.down_proj.bias', 'model.layers.8.mlp.gate_proj.bias', 'model.layers.8.mlp.up_proj.bias', 'model.layers.8.self_attn.k_proj.bias', 'model.layers.8.self_attn.o_proj.bias', 'model.layers.8.self_attn.q_proj.bias', 'model.layers.8.self_attn.v_proj.bias', 'model.layers.9.mlp.down_proj.bias', 'model.layers.9.mlp.gate_proj.bias', 'model.layers.9.mlp.up_proj.bias', 'model.layers.9.self_attn.k_proj.bias', 'model.layers.9.self_attn.o_proj.bias', 'model.layers.9.self_attn.q_proj.bias', 'model.layers.9.self_attn.v_proj.bias'] +- This IS expected if you are initializing LlamaForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). +- This IS NOT expected if you are initializing LlamaForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_microphone.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_microphone.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_microphone.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_object_detection.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_object_detection.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_planning.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_planning.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_planning.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_bot.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_bot.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_bot.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_webcam.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_plot_webcam.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_robot.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_robot.txt new file mode 100644 index 0000000000000000000000000000000000000000..922a8de6d6568e017e52f621dd5a0ce68d90b693 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_robot.txt @@ -0,0 +1,12 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init an operator + 2: failed to init python operator + 3: ModuleNotFoundError: No module named 'python3' + +Location: + binaries/runtime/src/operator/python.rs:30:9 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_webcam.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..ebcc34c2e74c7ad93d2b45d10ed607de4729087c --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_webcam.txt @@ -0,0 +1,17 @@ +[ WARN:0@0.059] global cap_v4l.cpp:999 open VIDEOIO(V4L2:/dev/video2): can't open camera by index +[ERROR:0@0.059] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_whisper.txt b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_whisper.txt new file mode 100644 index 0000000000000000000000000000000000000000..80795cce2243c2b2c5824a9e2e1ef73a5da53b7d --- /dev/null +++ b/dora-robomaster/graphs/out/01902f27-fd62-79e3-8089-5f3a2020a226/log_whisper.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f27-fd62-79e3-8089-5f3a2020a226 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_bot_webcam.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_bot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d0175f8c95d8bf1d3ed4039cbb626a4f92053e4 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_bot_webcam.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/opencv_stream.py", line 5, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_cv2_encoder.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_cv2_encoder.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_cv2_encoder.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_dora-record.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_dora-record.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a3cb59acfb276111cdc60988b7db50d412f95fc --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_dora-record.txt @@ -0,0 +1,11 @@ +Error: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + /home/peiji/.cargo/registry/src/index.crates.io-6f17d22bba15001f/dora-node-api-0.3.4/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_file_saver.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_file_saver.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_file_saver.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_keyboard.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_keyboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..8fcf810b6def9edd90181d95a56cdc66b2161690 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_keyboard.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/keyboard_op.py", line 7, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_llm.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..53473c139e647ca923efd8985e1f88fdaad5e7fe --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_llm.txt @@ -0,0 +1,25 @@ +CUDA extension not installed. +CUDA extension not installed. +/home/peiji/anaconda3/envs/cu122/lib/python3.10/site-packages/transformers/modeling_utils.py:4371: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( +The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +Some weights of the model checkpoint at /home/peiji/deepseek-coder-6.7B-instruct-GPTQ/ were not used when initializing LlamaForCausalLM: ['model.layers.0.mlp.down_proj.bias', 'model.layers.0.mlp.gate_proj.bias', 'model.layers.0.mlp.up_proj.bias', 'model.layers.0.self_attn.k_proj.bias', 'model.layers.0.self_attn.o_proj.bias', 'model.layers.0.self_attn.q_proj.bias', 'model.layers.0.self_attn.v_proj.bias', 'model.layers.1.mlp.down_proj.bias', 'model.layers.1.mlp.gate_proj.bias', 'model.layers.1.mlp.up_proj.bias', 'model.layers.1.self_attn.k_proj.bias', 'model.layers.1.self_attn.o_proj.bias', 'model.layers.1.self_attn.q_proj.bias', 'model.layers.1.self_attn.v_proj.bias', 'model.layers.10.mlp.down_proj.bias', 'model.layers.10.mlp.gate_proj.bias', 'model.layers.10.mlp.up_proj.bias', 'model.layers.10.self_attn.k_proj.bias', 'model.layers.10.self_attn.o_proj.bias', 'model.layers.10.self_attn.q_proj.bias', 'model.layers.10.self_attn.v_proj.bias', 'model.layers.11.mlp.down_proj.bias', 'model.layers.11.mlp.gate_proj.bias', 'model.layers.11.mlp.up_proj.bias', 'model.layers.11.self_attn.k_proj.bias', 'model.layers.11.self_attn.o_proj.bias', 'model.layers.11.self_attn.q_proj.bias', 'model.layers.11.self_attn.v_proj.bias', 'model.layers.12.mlp.down_proj.bias', 'model.layers.12.mlp.gate_proj.bias', 'model.layers.12.mlp.up_proj.bias', 'model.layers.12.self_attn.k_proj.bias', 'model.layers.12.self_attn.o_proj.bias', 'model.layers.12.self_attn.q_proj.bias', 'model.layers.12.self_attn.v_proj.bias', 'model.layers.13.mlp.down_proj.bias', 'model.layers.13.mlp.gate_proj.bias', 'model.layers.13.mlp.up_proj.bias', 'model.layers.13.self_attn.k_proj.bias', 'model.layers.13.self_attn.o_proj.bias', 'model.layers.13.self_attn.q_proj.bias', 'model.layers.13.self_attn.v_proj.bias', 'model.layers.14.mlp.down_proj.bias', 'model.layers.14.mlp.gate_proj.bias', 'model.layers.14.mlp.up_proj.bias', 'model.layers.14.self_attn.k_proj.bias', 'model.layers.14.self_attn.o_proj.bias', 'model.layers.14.self_attn.q_proj.bias', 'model.layers.14.self_attn.v_proj.bias', 'model.layers.15.mlp.down_proj.bias', 'model.layers.15.mlp.gate_proj.bias', 'model.layers.15.mlp.up_proj.bias', 'model.layers.15.self_attn.k_proj.bias', 'model.layers.15.self_attn.o_proj.bias', 'model.layers.15.self_attn.q_proj.bias', 'model.layers.15.self_attn.v_proj.bias', 'model.layers.16.mlp.down_proj.bias', 'model.layers.16.mlp.gate_proj.bias', 'model.layers.16.mlp.up_proj.bias', 'model.layers.16.self_attn.k_proj.bias', 'model.layers.16.self_attn.o_proj.bias', 'model.layers.16.self_attn.q_proj.bias', 'model.layers.16.self_attn.v_proj.bias', 'model.layers.17.mlp.down_proj.bias', 'model.layers.17.mlp.gate_proj.bias', 'model.layers.17.mlp.up_proj.bias', 'model.layers.17.self_attn.k_proj.bias', 'model.layers.17.self_attn.o_proj.bias', 'model.layers.17.self_attn.q_proj.bias', 'model.layers.17.self_attn.v_proj.bias', 'model.layers.18.mlp.down_proj.bias', 'model.layers.18.mlp.gate_proj.bias', 'model.layers.18.mlp.up_proj.bias', 'model.layers.18.self_attn.k_proj.bias', 'model.layers.18.self_attn.o_proj.bias', 'model.layers.18.self_attn.q_proj.bias', 'model.layers.18.self_attn.v_proj.bias', 'model.layers.19.mlp.down_proj.bias', 'model.layers.19.mlp.gate_proj.bias', 'model.layers.19.mlp.up_proj.bias', 'model.layers.19.self_attn.k_proj.bias', 'model.layers.19.self_attn.o_proj.bias', 'model.layers.19.self_attn.q_proj.bias', 'model.layers.19.self_attn.v_proj.bias', 'model.layers.2.mlp.down_proj.bias', 'model.layers.2.mlp.gate_proj.bias', 'model.layers.2.mlp.up_proj.bias', 'model.layers.2.self_attn.k_proj.bias', 'model.layers.2.self_attn.o_proj.bias', 'model.layers.2.self_attn.q_proj.bias', 'model.layers.2.self_attn.v_proj.bias', 'model.layers.20.mlp.down_proj.bias', 'model.layers.20.mlp.gate_proj.bias', 'model.layers.20.mlp.up_proj.bias', 'model.layers.20.self_attn.k_proj.bias', 'model.layers.20.self_attn.o_proj.bias', 'model.layers.20.self_attn.q_proj.bias', 'model.layers.20.self_attn.v_proj.bias', 'model.layers.21.mlp.down_proj.bias', 'model.layers.21.mlp.gate_proj.bias', 'model.layers.21.mlp.up_proj.bias', 'model.layers.21.self_attn.k_proj.bias', 'model.layers.21.self_attn.o_proj.bias', 'model.layers.21.self_attn.q_proj.bias', 'model.layers.21.self_attn.v_proj.bias', 'model.layers.22.mlp.down_proj.bias', 'model.layers.22.mlp.gate_proj.bias', 'model.layers.22.mlp.up_proj.bias', 'model.layers.22.self_attn.k_proj.bias', 'model.layers.22.self_attn.o_proj.bias', 'model.layers.22.self_attn.q_proj.bias', 'model.layers.22.self_attn.v_proj.bias', 'model.layers.23.mlp.down_proj.bias', 'model.layers.23.mlp.gate_proj.bias', 'model.layers.23.mlp.up_proj.bias', 'model.layers.23.self_attn.k_proj.bias', 'model.layers.23.self_attn.o_proj.bias', 'model.layers.23.self_attn.q_proj.bias', 'model.layers.23.self_attn.v_proj.bias', 'model.layers.24.mlp.down_proj.bias', 'model.layers.24.mlp.gate_proj.bias', 'model.layers.24.mlp.up_proj.bias', 'model.layers.24.self_attn.k_proj.bias', 'model.layers.24.self_attn.o_proj.bias', 'model.layers.24.self_attn.q_proj.bias', 'model.layers.24.self_attn.v_proj.bias', 'model.layers.25.mlp.down_proj.bias', 'model.layers.25.mlp.gate_proj.bias', 'model.layers.25.mlp.up_proj.bias', 'model.layers.25.self_attn.k_proj.bias', 'model.layers.25.self_attn.o_proj.bias', 'model.layers.25.self_attn.q_proj.bias', 'model.layers.25.self_attn.v_proj.bias', 'model.layers.26.mlp.down_proj.bias', 'model.layers.26.mlp.gate_proj.bias', 'model.layers.26.mlp.up_proj.bias', 'model.layers.26.self_attn.k_proj.bias', 'model.layers.26.self_attn.o_proj.bias', 'model.layers.26.self_attn.q_proj.bias', 'model.layers.26.self_attn.v_proj.bias', 'model.layers.27.mlp.down_proj.bias', 'model.layers.27.mlp.gate_proj.bias', 'model.layers.27.mlp.up_proj.bias', 'model.layers.27.self_attn.k_proj.bias', 'model.layers.27.self_attn.o_proj.bias', 'model.layers.27.self_attn.q_proj.bias', 'model.layers.27.self_attn.v_proj.bias', 'model.layers.28.mlp.down_proj.bias', 'model.layers.28.mlp.gate_proj.bias', 'model.layers.28.mlp.up_proj.bias', 'model.layers.28.self_attn.k_proj.bias', 'model.layers.28.self_attn.o_proj.bias', 'model.layers.28.self_attn.q_proj.bias', 'model.layers.28.self_attn.v_proj.bias', 'model.layers.29.mlp.down_proj.bias', 'model.layers.29.mlp.gate_proj.bias', 'model.layers.29.mlp.up_proj.bias', 'model.layers.29.self_attn.k_proj.bias', 'model.layers.29.self_attn.o_proj.bias', 'model.layers.29.self_attn.q_proj.bias', 'model.layers.29.self_attn.v_proj.bias', 'model.layers.3.mlp.down_proj.bias', 'model.layers.3.mlp.gate_proj.bias', 'model.layers.3.mlp.up_proj.bias', 'model.layers.3.self_attn.k_proj.bias', 'model.layers.3.self_attn.o_proj.bias', 'model.layers.3.self_attn.q_proj.bias', 'model.layers.3.self_attn.v_proj.bias', 'model.layers.30.mlp.down_proj.bias', 'model.layers.30.mlp.gate_proj.bias', 'model.layers.30.mlp.up_proj.bias', 'model.layers.30.self_attn.k_proj.bias', 'model.layers.30.self_attn.o_proj.bias', 'model.layers.30.self_attn.q_proj.bias', 'model.layers.30.self_attn.v_proj.bias', 'model.layers.31.mlp.down_proj.bias', 'model.layers.31.mlp.gate_proj.bias', 'model.layers.31.mlp.up_proj.bias', 'model.layers.31.self_attn.k_proj.bias', 'model.layers.31.self_attn.o_proj.bias', 'model.layers.31.self_attn.q_proj.bias', 'model.layers.31.self_attn.v_proj.bias', 'model.layers.4.mlp.down_proj.bias', 'model.layers.4.mlp.gate_proj.bias', 'model.layers.4.mlp.up_proj.bias', 'model.layers.4.self_attn.k_proj.bias', 'model.layers.4.self_attn.o_proj.bias', 'model.layers.4.self_attn.q_proj.bias', 'model.layers.4.self_attn.v_proj.bias', 'model.layers.5.mlp.down_proj.bias', 'model.layers.5.mlp.gate_proj.bias', 'model.layers.5.mlp.up_proj.bias', 'model.layers.5.self_attn.k_proj.bias', 'model.layers.5.self_attn.o_proj.bias', 'model.layers.5.self_attn.q_proj.bias', 'model.layers.5.self_attn.v_proj.bias', 'model.layers.6.mlp.down_proj.bias', 'model.layers.6.mlp.gate_proj.bias', 'model.layers.6.mlp.up_proj.bias', 'model.layers.6.self_attn.k_proj.bias', 'model.layers.6.self_attn.o_proj.bias', 'model.layers.6.self_attn.q_proj.bias', 'model.layers.6.self_attn.v_proj.bias', 'model.layers.7.mlp.down_proj.bias', 'model.layers.7.mlp.gate_proj.bias', 'model.layers.7.mlp.up_proj.bias', 'model.layers.7.self_attn.k_proj.bias', 'model.layers.7.self_attn.o_proj.bias', 'model.layers.7.self_attn.q_proj.bias', 'model.layers.7.self_attn.v_proj.bias', 'model.layers.8.mlp.down_proj.bias', 'model.layers.8.mlp.gate_proj.bias', 'model.layers.8.mlp.up_proj.bias', 'model.layers.8.self_attn.k_proj.bias', 'model.layers.8.self_attn.o_proj.bias', 'model.layers.8.self_attn.q_proj.bias', 'model.layers.8.self_attn.v_proj.bias', 'model.layers.9.mlp.down_proj.bias', 'model.layers.9.mlp.gate_proj.bias', 'model.layers.9.mlp.up_proj.bias', 'model.layers.9.self_attn.k_proj.bias', 'model.layers.9.self_attn.o_proj.bias', 'model.layers.9.self_attn.q_proj.bias', 'model.layers.9.self_attn.v_proj.bias'] +- This IS expected if you are initializing LlamaForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). +- This IS NOT expected if you are initializing LlamaForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_microphone.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_microphone.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_microphone.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_object_detection.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_object_detection.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_planning.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_planning.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_planning.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_bot.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_bot.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_bot.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_webcam.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_plot_webcam.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_robot.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_robot.txt new file mode 100644 index 0000000000000000000000000000000000000000..922a8de6d6568e017e52f621dd5a0ce68d90b693 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_robot.txt @@ -0,0 +1,12 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init an operator + 2: failed to init python operator + 3: ModuleNotFoundError: No module named 'python3' + +Location: + binaries/runtime/src/operator/python.rs:30:9 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_webcam.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8a70e57d1b9a5334d38a357b725496b83db14ca --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_webcam.txt @@ -0,0 +1,17 @@ +[ WARN:0@0.263] global cap_v4l.cpp:999 open VIDEOIO(V4L2:/dev/video2): can't open camera by index +[ERROR:0@0.263] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_whisper.txt b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_whisper.txt new file mode 100644 index 0000000000000000000000000000000000000000..06648379dff13490e9a451593148dec6a6ea5e45 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2a-23ce-72f4-875b-c0a8825d125e/log_whisper.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2a-23ce-72f4-875b-c0a8825d125e robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_bot_webcam.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_bot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..cea3c23ce9b808fed39c16f71911ea98879a79ea --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_bot_webcam.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/opencv_stream.py", line 5, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_cv2_encoder.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_cv2_encoder.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_cv2_encoder.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_dora-record.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_dora-record.txt new file mode 100644 index 0000000000000000000000000000000000000000..c2ee05cdfaecf20a4d4b3e46fcb1b2517cacdd41 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_dora-record.txt @@ -0,0 +1,11 @@ +Error: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + /home/peiji/.cargo/registry/src/index.crates.io-6f17d22bba15001f/dora-node-api-0.3.4/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_file_saver.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_file_saver.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_file_saver.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_keyboard.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_keyboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..34817ed43e826ad494a60bab94cd4056afdc178c --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_keyboard.txt @@ -0,0 +1,14 @@ +Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/keyboard_op.py", line 7, in + node = Node() +RuntimeError: failed to init event stream + +Caused by: + subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_llm.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3446b2dff2007081a531437a460baecc5145926 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_llm.txt @@ -0,0 +1,25 @@ +CUDA extension not installed. +CUDA extension not installed. +/home/peiji/anaconda3/envs/cu122/lib/python3.10/site-packages/transformers/modeling_utils.py:4371: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( +The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +Some weights of the model checkpoint at /home/peiji/deepseek-coder-6.7B-instruct-GPTQ/ were not used when initializing LlamaForCausalLM: ['model.layers.0.mlp.down_proj.bias', 'model.layers.0.mlp.gate_proj.bias', 'model.layers.0.mlp.up_proj.bias', 'model.layers.0.self_attn.k_proj.bias', 'model.layers.0.self_attn.o_proj.bias', 'model.layers.0.self_attn.q_proj.bias', 'model.layers.0.self_attn.v_proj.bias', 'model.layers.1.mlp.down_proj.bias', 'model.layers.1.mlp.gate_proj.bias', 'model.layers.1.mlp.up_proj.bias', 'model.layers.1.self_attn.k_proj.bias', 'model.layers.1.self_attn.o_proj.bias', 'model.layers.1.self_attn.q_proj.bias', 'model.layers.1.self_attn.v_proj.bias', 'model.layers.10.mlp.down_proj.bias', 'model.layers.10.mlp.gate_proj.bias', 'model.layers.10.mlp.up_proj.bias', 'model.layers.10.self_attn.k_proj.bias', 'model.layers.10.self_attn.o_proj.bias', 'model.layers.10.self_attn.q_proj.bias', 'model.layers.10.self_attn.v_proj.bias', 'model.layers.11.mlp.down_proj.bias', 'model.layers.11.mlp.gate_proj.bias', 'model.layers.11.mlp.up_proj.bias', 'model.layers.11.self_attn.k_proj.bias', 'model.layers.11.self_attn.o_proj.bias', 'model.layers.11.self_attn.q_proj.bias', 'model.layers.11.self_attn.v_proj.bias', 'model.layers.12.mlp.down_proj.bias', 'model.layers.12.mlp.gate_proj.bias', 'model.layers.12.mlp.up_proj.bias', 'model.layers.12.self_attn.k_proj.bias', 'model.layers.12.self_attn.o_proj.bias', 'model.layers.12.self_attn.q_proj.bias', 'model.layers.12.self_attn.v_proj.bias', 'model.layers.13.mlp.down_proj.bias', 'model.layers.13.mlp.gate_proj.bias', 'model.layers.13.mlp.up_proj.bias', 'model.layers.13.self_attn.k_proj.bias', 'model.layers.13.self_attn.o_proj.bias', 'model.layers.13.self_attn.q_proj.bias', 'model.layers.13.self_attn.v_proj.bias', 'model.layers.14.mlp.down_proj.bias', 'model.layers.14.mlp.gate_proj.bias', 'model.layers.14.mlp.up_proj.bias', 'model.layers.14.self_attn.k_proj.bias', 'model.layers.14.self_attn.o_proj.bias', 'model.layers.14.self_attn.q_proj.bias', 'model.layers.14.self_attn.v_proj.bias', 'model.layers.15.mlp.down_proj.bias', 'model.layers.15.mlp.gate_proj.bias', 'model.layers.15.mlp.up_proj.bias', 'model.layers.15.self_attn.k_proj.bias', 'model.layers.15.self_attn.o_proj.bias', 'model.layers.15.self_attn.q_proj.bias', 'model.layers.15.self_attn.v_proj.bias', 'model.layers.16.mlp.down_proj.bias', 'model.layers.16.mlp.gate_proj.bias', 'model.layers.16.mlp.up_proj.bias', 'model.layers.16.self_attn.k_proj.bias', 'model.layers.16.self_attn.o_proj.bias', 'model.layers.16.self_attn.q_proj.bias', 'model.layers.16.self_attn.v_proj.bias', 'model.layers.17.mlp.down_proj.bias', 'model.layers.17.mlp.gate_proj.bias', 'model.layers.17.mlp.up_proj.bias', 'model.layers.17.self_attn.k_proj.bias', 'model.layers.17.self_attn.o_proj.bias', 'model.layers.17.self_attn.q_proj.bias', 'model.layers.17.self_attn.v_proj.bias', 'model.layers.18.mlp.down_proj.bias', 'model.layers.18.mlp.gate_proj.bias', 'model.layers.18.mlp.up_proj.bias', 'model.layers.18.self_attn.k_proj.bias', 'model.layers.18.self_attn.o_proj.bias', 'model.layers.18.self_attn.q_proj.bias', 'model.layers.18.self_attn.v_proj.bias', 'model.layers.19.mlp.down_proj.bias', 'model.layers.19.mlp.gate_proj.bias', 'model.layers.19.mlp.up_proj.bias', 'model.layers.19.self_attn.k_proj.bias', 'model.layers.19.self_attn.o_proj.bias', 'model.layers.19.self_attn.q_proj.bias', 'model.layers.19.self_attn.v_proj.bias', 'model.layers.2.mlp.down_proj.bias', 'model.layers.2.mlp.gate_proj.bias', 'model.layers.2.mlp.up_proj.bias', 'model.layers.2.self_attn.k_proj.bias', 'model.layers.2.self_attn.o_proj.bias', 'model.layers.2.self_attn.q_proj.bias', 'model.layers.2.self_attn.v_proj.bias', 'model.layers.20.mlp.down_proj.bias', 'model.layers.20.mlp.gate_proj.bias', 'model.layers.20.mlp.up_proj.bias', 'model.layers.20.self_attn.k_proj.bias', 'model.layers.20.self_attn.o_proj.bias', 'model.layers.20.self_attn.q_proj.bias', 'model.layers.20.self_attn.v_proj.bias', 'model.layers.21.mlp.down_proj.bias', 'model.layers.21.mlp.gate_proj.bias', 'model.layers.21.mlp.up_proj.bias', 'model.layers.21.self_attn.k_proj.bias', 'model.layers.21.self_attn.o_proj.bias', 'model.layers.21.self_attn.q_proj.bias', 'model.layers.21.self_attn.v_proj.bias', 'model.layers.22.mlp.down_proj.bias', 'model.layers.22.mlp.gate_proj.bias', 'model.layers.22.mlp.up_proj.bias', 'model.layers.22.self_attn.k_proj.bias', 'model.layers.22.self_attn.o_proj.bias', 'model.layers.22.self_attn.q_proj.bias', 'model.layers.22.self_attn.v_proj.bias', 'model.layers.23.mlp.down_proj.bias', 'model.layers.23.mlp.gate_proj.bias', 'model.layers.23.mlp.up_proj.bias', 'model.layers.23.self_attn.k_proj.bias', 'model.layers.23.self_attn.o_proj.bias', 'model.layers.23.self_attn.q_proj.bias', 'model.layers.23.self_attn.v_proj.bias', 'model.layers.24.mlp.down_proj.bias', 'model.layers.24.mlp.gate_proj.bias', 'model.layers.24.mlp.up_proj.bias', 'model.layers.24.self_attn.k_proj.bias', 'model.layers.24.self_attn.o_proj.bias', 'model.layers.24.self_attn.q_proj.bias', 'model.layers.24.self_attn.v_proj.bias', 'model.layers.25.mlp.down_proj.bias', 'model.layers.25.mlp.gate_proj.bias', 'model.layers.25.mlp.up_proj.bias', 'model.layers.25.self_attn.k_proj.bias', 'model.layers.25.self_attn.o_proj.bias', 'model.layers.25.self_attn.q_proj.bias', 'model.layers.25.self_attn.v_proj.bias', 'model.layers.26.mlp.down_proj.bias', 'model.layers.26.mlp.gate_proj.bias', 'model.layers.26.mlp.up_proj.bias', 'model.layers.26.self_attn.k_proj.bias', 'model.layers.26.self_attn.o_proj.bias', 'model.layers.26.self_attn.q_proj.bias', 'model.layers.26.self_attn.v_proj.bias', 'model.layers.27.mlp.down_proj.bias', 'model.layers.27.mlp.gate_proj.bias', 'model.layers.27.mlp.up_proj.bias', 'model.layers.27.self_attn.k_proj.bias', 'model.layers.27.self_attn.o_proj.bias', 'model.layers.27.self_attn.q_proj.bias', 'model.layers.27.self_attn.v_proj.bias', 'model.layers.28.mlp.down_proj.bias', 'model.layers.28.mlp.gate_proj.bias', 'model.layers.28.mlp.up_proj.bias', 'model.layers.28.self_attn.k_proj.bias', 'model.layers.28.self_attn.o_proj.bias', 'model.layers.28.self_attn.q_proj.bias', 'model.layers.28.self_attn.v_proj.bias', 'model.layers.29.mlp.down_proj.bias', 'model.layers.29.mlp.gate_proj.bias', 'model.layers.29.mlp.up_proj.bias', 'model.layers.29.self_attn.k_proj.bias', 'model.layers.29.self_attn.o_proj.bias', 'model.layers.29.self_attn.q_proj.bias', 'model.layers.29.self_attn.v_proj.bias', 'model.layers.3.mlp.down_proj.bias', 'model.layers.3.mlp.gate_proj.bias', 'model.layers.3.mlp.up_proj.bias', 'model.layers.3.self_attn.k_proj.bias', 'model.layers.3.self_attn.o_proj.bias', 'model.layers.3.self_attn.q_proj.bias', 'model.layers.3.self_attn.v_proj.bias', 'model.layers.30.mlp.down_proj.bias', 'model.layers.30.mlp.gate_proj.bias', 'model.layers.30.mlp.up_proj.bias', 'model.layers.30.self_attn.k_proj.bias', 'model.layers.30.self_attn.o_proj.bias', 'model.layers.30.self_attn.q_proj.bias', 'model.layers.30.self_attn.v_proj.bias', 'model.layers.31.mlp.down_proj.bias', 'model.layers.31.mlp.gate_proj.bias', 'model.layers.31.mlp.up_proj.bias', 'model.layers.31.self_attn.k_proj.bias', 'model.layers.31.self_attn.o_proj.bias', 'model.layers.31.self_attn.q_proj.bias', 'model.layers.31.self_attn.v_proj.bias', 'model.layers.4.mlp.down_proj.bias', 'model.layers.4.mlp.gate_proj.bias', 'model.layers.4.mlp.up_proj.bias', 'model.layers.4.self_attn.k_proj.bias', 'model.layers.4.self_attn.o_proj.bias', 'model.layers.4.self_attn.q_proj.bias', 'model.layers.4.self_attn.v_proj.bias', 'model.layers.5.mlp.down_proj.bias', 'model.layers.5.mlp.gate_proj.bias', 'model.layers.5.mlp.up_proj.bias', 'model.layers.5.self_attn.k_proj.bias', 'model.layers.5.self_attn.o_proj.bias', 'model.layers.5.self_attn.q_proj.bias', 'model.layers.5.self_attn.v_proj.bias', 'model.layers.6.mlp.down_proj.bias', 'model.layers.6.mlp.gate_proj.bias', 'model.layers.6.mlp.up_proj.bias', 'model.layers.6.self_attn.k_proj.bias', 'model.layers.6.self_attn.o_proj.bias', 'model.layers.6.self_attn.q_proj.bias', 'model.layers.6.self_attn.v_proj.bias', 'model.layers.7.mlp.down_proj.bias', 'model.layers.7.mlp.gate_proj.bias', 'model.layers.7.mlp.up_proj.bias', 'model.layers.7.self_attn.k_proj.bias', 'model.layers.7.self_attn.o_proj.bias', 'model.layers.7.self_attn.q_proj.bias', 'model.layers.7.self_attn.v_proj.bias', 'model.layers.8.mlp.down_proj.bias', 'model.layers.8.mlp.gate_proj.bias', 'model.layers.8.mlp.up_proj.bias', 'model.layers.8.self_attn.k_proj.bias', 'model.layers.8.self_attn.o_proj.bias', 'model.layers.8.self_attn.q_proj.bias', 'model.layers.8.self_attn.v_proj.bias', 'model.layers.9.mlp.down_proj.bias', 'model.layers.9.mlp.gate_proj.bias', 'model.layers.9.mlp.up_proj.bias', 'model.layers.9.self_attn.k_proj.bias', 'model.layers.9.self_attn.o_proj.bias', 'model.layers.9.self_attn.q_proj.bias', 'model.layers.9.self_attn.v_proj.bias'] +- This IS expected if you are initializing LlamaForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). +- This IS NOT expected if you are initializing LlamaForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_microphone.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_microphone.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_microphone.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_object_detection.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_object_detection.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_planning.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_planning.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_planning.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_bot.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_bot.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_bot.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_webcam.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_plot_webcam.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_robot.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_robot.txt new file mode 100644 index 0000000000000000000000000000000000000000..98a698e7c699bbfa479899dcd3da43ab125e4cfe --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_robot.txt @@ -0,0 +1,16 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init an operator + 2: failed to init python operator + 3: Traceback (most recent call last): + File "/home/peiji/dora/dora-robomaster/operators/robot.py", line 1, in + from robomaster import robot, blaster, led + + ModuleNotFoundError: No module named 'robomaster' + +Location: + binaries/runtime/src/operator/python.rs:28:9 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_webcam.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..d70828f237e3201f2798b960966977f6209b1e5f --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_webcam.txt @@ -0,0 +1,17 @@ +[ WARN:0@0.051] global cap_v4l.cpp:999 open VIDEOIO(V4L2:/dev/video2): can't open camera by index +[ERROR:0@0.051] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_whisper.txt b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_whisper.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fb3b76f44cb191f1d669aa5f1ecd7aa2ece8176 --- /dev/null +++ b/dora-robomaster/graphs/out/01902f2d-1028-7bff-a48f-f8bfb75009e9/log_whisper.txt @@ -0,0 +1,15 @@ +Traceback (most recent call last): + File "", line 1, in +RuntimeError: Dora Runtime raised an error. + +Caused by: + 0: main task failed + 1: failed to init event stream + 2: subscribe failed: Some nodes exited before subscribing to dora: {NodeId("robot")} + + This is typically happens when an initialization error occurs + in the node or operator. To check the output of the failed + nodes, run `dora logs 01902f2d-1028-7bff-a48f-f8bfb75009e9 robot`. + +Location: + apis/rust/node/src/event_stream/mod.rs:90:17 diff --git a/dora-robomaster/graphs/reaction.yml b/dora-robomaster/graphs/reaction.yml new file mode 100644 index 0000000000000000000000000000000000000000..4521cb5141f7293e8476cbf2a72b2821d068bb9f --- /dev/null +++ b/dora-robomaster/graphs/reaction.yml @@ -0,0 +1,40 @@ +nodes: + - id: robot + operator: + python: ../operators/robot.py + inputs: + blaster: + source: planning/blaster + queue_size: 1 + led: + source: planning/led + queue_size: 1 + tick: + source: dora/timer/millis/50 + queue_size: 1 + outputs: + - image + - position + - id: plot + operator: + python: ../operators/plot.py + inputs: + image: robot/image + bbox: object_detection/bbox + - id: object_detection + operator: + python: ../operators/object_detection.py + inputs: + image: + source: robot/image + queue_size: 1 + outputs: + - bbox + - id: planning + operator: + python: ../operators/reaction_op.py + inputs: + bbox: object_detection/bbox + outputs: + - led + - blaster diff --git a/dora-robomaster/operators/__pycache__/cv2_encoder_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/cv2_encoder_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a17fc3f7f4240e0d59ec116db95eb495c9df712 Binary files /dev/null and b/dora-robomaster/operators/__pycache__/cv2_encoder_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/file_saver_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/file_saver_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeda93ce6f47b80b00279908b0d683f83d80121c Binary files /dev/null and b/dora-robomaster/operators/__pycache__/file_saver_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/llm_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/llm_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6dfa2e47bb5e627616aaf40e693ba90b189074f Binary files /dev/null and b/dora-robomaster/operators/__pycache__/llm_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/microphone_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/microphone_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..509ef9a793881e0265ae7c3bb3d303824868304b Binary files /dev/null and b/dora-robomaster/operators/__pycache__/microphone_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/object_detection.cpython-310.pyc b/dora-robomaster/operators/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a6d823fe211abcf4bb7a6b0d7444bd6d02fd0d Binary files /dev/null and b/dora-robomaster/operators/__pycache__/object_detection.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/planning_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/planning_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337990678ec7384c78cca1648f56b5eacbf036d6 Binary files /dev/null and b/dora-robomaster/operators/__pycache__/planning_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/plot.cpython-310.pyc b/dora-robomaster/operators/__pycache__/plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f608394b6cba594bfd524163fbd069d9c2cb578a Binary files /dev/null and b/dora-robomaster/operators/__pycache__/plot.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/robot.cpython-310.pyc b/dora-robomaster/operators/__pycache__/robot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59e3a0faadfcd4bf1c0203533805783686fc62c0 Binary files /dev/null and b/dora-robomaster/operators/__pycache__/robot.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/utils.cpython-310.pyc b/dora-robomaster/operators/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7653d377279bf3b2719e28753be60571e9ab1bf1 Binary files /dev/null and b/dora-robomaster/operators/__pycache__/utils.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/webcam.cpython-310.pyc b/dora-robomaster/operators/__pycache__/webcam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec0be7693210e58b973cf96bc061628e76e737dd Binary files /dev/null and b/dora-robomaster/operators/__pycache__/webcam.cpython-310.pyc differ diff --git a/dora-robomaster/operators/__pycache__/whisper_op.cpython-310.pyc b/dora-robomaster/operators/__pycache__/whisper_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df17948c08383fdc6e7c5f280b78374c9a836369 Binary files /dev/null and b/dora-robomaster/operators/__pycache__/whisper_op.cpython-310.pyc differ diff --git a/dora-robomaster/operators/chatgpt_op.py b/dora-robomaster/operators/chatgpt_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2bacd3398d093fb66eab71e8490197942d42b357 --- /dev/null +++ b/dora-robomaster/operators/chatgpt_op.py @@ -0,0 +1,130 @@ +import os +from openai import OpenAI + + +def ask_gpt(prompt, raw): + client = OpenAI() + + prompt = ( + "this is a python code :\n" + + "```python\n" + + raw + + "```\n" + + prompt + + "Format your response by: Showing the whole modified code. No explanation is required. Only code." + ) + + response = client.chat.completions.create( + model="gpt-4-turbo-preview", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ], + ) + + answer = response.choices[0].message.content + return prompt, answer + + +def extract_command(gptCommand): + blocks = [] + temp = "" + writing = False + + for line in gptCommand.splitlines(): + if line == "```": + writing = False + blocks.append(temp) + temp = "" + + if writing: + temp += line + temp += "\n" + + if line == "```python": + writing = True + + return blocks + + +def save_as(content, path): + # use at the end of replace_2 as save_as(end_result, "file_path") + with open(path, "w") as file: + file.write(content) + + +import pyarrow as pa + +from dora import DoraStatus +import time + + +class Operator: + """ + Infering object from images + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + input = dora_event["value"][0].as_py() + with open(input["path"], "r", encoding="utf8") as f: + raw = f.read() + ask_time = time.time() + print("--- Asking chatGPT ", flush=True) + prompt, response = ask_gpt(input["query"], raw) + blocks = extract_command(response) + print(response, flush=True) + print("Response time:", time.time() - ask_time, flush=True) + + send_output( + "output_file", + pa.array( + [ + { + "raw": blocks[0], + "path": input["path"], + "response": response, + "prompt": prompt, + } + ] + ), + dora_event["metadata"], + ) + + return DoraStatus.CONTINUE + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "/planning_op.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "tick", + "value": pa.array( + [ + { + "raw": raw, + "path": path, + "query": "Can you change the RGB to change according to the object distances", + } + ] + ), + "metadata": [], + }, + print, + ) diff --git a/dora-robomaster/operators/cv2_encoder_op.py b/dora-robomaster/operators/cv2_encoder_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f15263479911e571c8289aa1d273f9964526e20b --- /dev/null +++ b/dora-robomaster/operators/cv2_encoder_op.py @@ -0,0 +1,32 @@ +import pyarrow as pa + +from dora import DoraStatus +import cv2 +import os + + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +ENCODING = os.getenv("ENCODING", ".jpg") + + +class Operator: + """ + Infering object from images + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + frame = ( + dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + retval, buffer = cv2.imencode(ENCODING, frame) + if retval: + send_output("encoded_image", pa.array([buffer]), dora_event["metadata"]) + + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/deepseek_op.py b/dora-robomaster/operators/deepseek_op.py new file mode 100644 index 0000000000000000000000000000000000000000..45e46423ad124329e9fd008269c0721d0beb5aab --- /dev/null +++ b/dora-robomaster/operators/deepseek_op.py @@ -0,0 +1,301 @@ +from dora import DoraStatus +import pylcs +import textwrap +import os +import pyarrow as pa +import numpy as np +from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline +import json + +import re + + +def extract_python_code_blocks(text): + """ + Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. + + Parameters: + - text: A string that may contain one or more Python code blocks. + + Returns: + - A list of strings, where each string is a block of Python code extracted from the text. + """ + pattern = r"```python\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```python\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + + return matches + + +def extract_json_code_blocks(text): + """ + Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. + + Parameters: + - text: A string that may contain one or more json code blocks. + + Returns: + - A list of strings, where each string is a block of json code extracted from the text. + """ + pattern = r"```json\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```json\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + + return matches + + +def remove_last_line(python_code): + """ + Removes the last line from a given string of Python code. + + Parameters: + - python_code: A string representing Python source code. + + Returns: + - A string with the last line removed. + """ + lines = python_code.split("\n") # Split the string into lines + if lines: # Check if there are any lines to remove + lines.pop() # Remove the last line + return "\n".join(lines) # Join the remaining lines back into a string + + +def calculate_similarity(source, target): + """ + Calculate a similarity score between the source and target strings. + This uses the edit distance relative to the length of the strings. + """ + edit_distance = pylcs.edit_distance(source, target) + max_length = max(len(source), len(target)) + # Normalize the score by the maximum possible edit distance (the length of the longer string) + similarity = 1 - (edit_distance / max_length) + return similarity + + +def find_best_match_location(source_code, target_block): + """ + Find the best match for the target_block within the source_code by searching line by line, + considering blocks of varying lengths. + """ + source_lines = source_code.split("\n") + target_lines = target_block.split("\n") + + best_similarity = 0 + best_start_index = -1 + best_end_index = -1 + + # Iterate over the source lines to find the best matching range for all lines in target_block + for start_index in range(len(source_lines) - len(target_lines) + 1): + for end_index in range(start_index + len(target_lines), len(source_lines) + 1): + current_window = "\n".join(source_lines[start_index:end_index]) + current_similarity = calculate_similarity(current_window, target_block) + if current_similarity > best_similarity: + best_similarity = current_similarity + best_start_index = start_index + best_end_index = end_index + + # Convert line indices back to character indices for replacement + char_start_index = len("\n".join(source_lines[:best_start_index])) + ( + 1 if best_start_index > 0 else 0 + ) + char_end_index = len("\n".join(source_lines[:best_end_index])) + + return char_start_index, char_end_index + + +def replace_code_in_source(source_code, replacement_block: str): + """ + Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. + """ + replacement_block = extract_python_code_blocks(replacement_block)[0] + print("replacement_block: ", replacement_block) + replacement_block = remove_last_line(replacement_block) + start_index, end_index = find_best_match_location(source_code, replacement_block) + + if start_index != -1 and end_index != -1: + # Replace the best matching part with the replacement block + new_source = ( + source_code[:start_index] + replacement_block + source_code[end_index:] + ) + return new_source + else: + return source_code + + +def save_as(content, path): + # use at the end of replace_2 as save_as(end_result, "file_path") + with open(path, "w") as file: + file.write(content) + + +class Operator: + def __init__(self): + # Load tokenizer + model_name_or_path = "/home/peiji/deepseek-coder-6.7B-instruct-GPTQ/" + # To use a different branch, change revision + # For example: revision="gptq-4bit-32g-actorder_True" + self.model = AutoModelForCausalLM.from_pretrained( + model_name_or_path, + device_map="auto", + trust_remote_code=False, + revision="main", + ) + + self.tokenizer = AutoTokenizer.from_pretrained( + model_name_or_path, use_fast=True + ) + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + input = dora_event["value"][0].as_py() + + if False: + with open(input["path"], "r", encoding="utf8") as f: + raw = f.read() + prompt = f"{raw} \n {input['query']}. " + print("prompt: ", prompt, flush=True) + output = self.ask_mistral( + "You're a python code expert. Respond with the small modified code only. No explaination", + prompt, + ) + print("output: {}".format(output)) + + source_code = replace_code_in_source(raw, output) + send_output( + "output_file", + pa.array( + [ + { + "raw": source_code, + # "path": input["path"], + # "response": output, + # "prompt": prompt, + } + ] + ), + dora_event["metadata"], + ) + else: + output = self.ask_mistral( + """You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed. +The schema for those json are: +- led: Int[3] (min: 0, max: 255) # RGB values +- blaster: Int (min: 0, max: 128) +- control: Int[3] (min: -1, max: 1) +- rotation: Int[2] (min: -55, max: 55) +- message: String + +The response should look like this: +```json +{ + "topic": "led", + "data": [255, 0, 0] +} +``` +""", + input["query"], + ) + output = extract_json_code_blocks(output)[0] + print("output: {}".format(output), flush=True) + try: + output = json.loads(output) + if not isinstance(output["data"], list): + output["data"] = [output["data"]] + + if output["topic"] in [ + "led", + "blaster", + "control", + "rotation", + "text", + ]: + print("output", output) + send_output( + output["topic"], + pa.array(output["data"]), + dora_event["metadata"], + ) + except: + print("Could not parse json") + # if data is not iterable, put data in a list + + return DoraStatus.CONTINUE + + def ask_mistral(self, system_message, prompt): + prompt_template = f""" +### Instruction +{system_message} + +{prompt} + +### Response: +""" + + # Generate output + + input = self.tokenizer(prompt_template, return_tensors="pt") + input_ids = input.input_ids.cuda() + + # add attention mask here + attention_mask = input["attention_mask"] + + output = self.model.generate( + inputs=input_ids, + temperature=0.7, + do_sample=True, + top_p=0.95, + top_k=40, + max_new_tokens=512, + attention_mask=attention_mask, + eos_token_id=self.tokenizer.eos_token_id, + ) + # Get the tokens from the output, decode them, print them + + # Get text between im_start and im_end + return self.tokenizer.decode(output[0], skip_special_tokens=True)[ + len(prompt_template) : + ] + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "plot.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "tick", + "value": pa.array( + [ + { + "raw": raw, + "path": path, + "query": "Send message my name is Carlito", + } + ] + ), + "metadata": [], + }, + print, + ) diff --git a/dora-robomaster/operators/file_saver_op.py b/dora-robomaster/operators/file_saver_op.py new file mode 100644 index 0000000000000000000000000000000000000000..c13b9e55f656d4a4bdfec658c4bdba5c84d7a223 --- /dev/null +++ b/dora-robomaster/operators/file_saver_op.py @@ -0,0 +1,44 @@ +import pyarrow as pa + +from dora import DoraStatus + + +class Operator: + """ + Infering object from images + """ + + def __init__(self): + self.last_file = "" + self.last_path = "" + self.last_netadata = None + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + input = dora_event["value"][0].as_py() + + with open(input["path"], "r") as file: + self.last_file = file.read() + self.last_path = input["path"] + self.last_metadata = dora_event["metadata"] + with open(input["path"], "w") as file: + file.write(input["raw"]) + + send_output( + "saved_file", + pa.array( + [ + { + "raw": input["raw"], + "path": input["path"], + "origin": dora_event["id"], + } + ] + ), + dora_event["metadata"], + ) + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/keyboard_op.py b/dora-robomaster/operators/keyboard_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2d179ac6309bdbf1ff159ddbd9947167d2f133bb --- /dev/null +++ b/dora-robomaster/operators/keyboard_op.py @@ -0,0 +1,65 @@ +from pynput import keyboard +from pynput.keyboard import Key, Events +import pyarrow as pa +from dora import Node + + +node = Node() +buffer_text = "" +ctrl = False +submitted_text = [] +cursor = 0 + +NODE_TOPIC = ["record", "send", "ask", "change"] + +with keyboard.Events() as events: + while True: + dora_event = node.next(0.01) + if ( + dora_event is not None + and dora_event["type"] == "INPUT" + and dora_event["id"] == "recording" + ): + buffer_text += dora_event["value"][0].as_py() + node.send_output("buffer", pa.array([buffer_text])) + continue + + event = events.get(1.0) + if event is not None and isinstance(event, Events.Press): + if hasattr(event.key, "char"): + cursor = 0 + buffer_text += event.key.char + node.send_output("buffer", pa.array([buffer_text])) + else: + if event.key == Key.backspace: + buffer_text = buffer_text[:-1] + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.esc: + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.enter: + node.send_output("submitted", pa.array([buffer_text])) + first_word = buffer_text.split(" ")[0] + if first_word in NODE_TOPIC: + node.send_output(first_word, pa.array([buffer_text])) + submitted_text.append(buffer_text) + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.ctrl: + ctrl = True + elif event.key == Key.space: + buffer_text += " " + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.up: + if len(submitted_text) > 0: + cursor = max(cursor - 1, -len(submitted_text)) + buffer_text = submitted_text[cursor] + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.down: + if len(submitted_text) > 0: + cursor = min(cursor + 1, 0) + buffer_text = submitted_text[cursor] + node.send_output("buffer", pa.array([buffer_text])) + elif event is not None and isinstance(event, Events.Release): + if event.key == Key.ctrl: + ctrl = False diff --git a/dora-robomaster/operators/llm_op.py b/dora-robomaster/operators/llm_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f277bc418ca71deee1087a5ed0f64290817de98f --- /dev/null +++ b/dora-robomaster/operators/llm_op.py @@ -0,0 +1,361 @@ +from dora import DoraStatus +import pylcs +import os +import pyarrow as pa +from transformers import AutoModelForCausalLM, AutoTokenizer +import json + +import re +import time + +CHATGPT = False +MODEL_NAME_OR_PATH = "/home/peiji/deepseek-coder-6.7B-instruct-GPTQ/" + +CODE_MODIFIER_TEMPLATE = """ +### Instruction +Respond with one block of modified code only in ```python block. No explaination. + +```python +{code} +``` + +{user_message} + +### Response: +""" + + +MESSAGE_SENDER_TEMPLATE = """ +### Instruction +You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed. +The schema for those json are: +- led: Int[3] (min: 0, max: 255) +- blaster: Int (min: 0, max: 128) +- control: Int[3] (min: -1, max: 1) +- rotation: Int[2] (min: -55, max: 55) +- line: Int[4] + +The response should look like this: +```json + + [ + {{ "topic": "line", "data": [10, 10, 90, 10] }}, +] +``` + +{user_message} + +### Response: +""" + +ASSISTANT_TEMPLATE = """ +### Instruction +You're a helpuf assistant named dora. +Reply with a short message. No code needed. + +User {user_message} + +### Response: +""" + + +model = AutoModelForCausalLM.from_pretrained( + MODEL_NAME_OR_PATH, + device_map="auto", + trust_remote_code=True, + revision="main", +) + + +tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True) + + +def extract_python_code_blocks(text): + """ + Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. + + Parameters: + - text: A string that may contain one or more Python code blocks. + + Returns: + - A list of strings, where each string is a block of Python code extracted from the text. + """ + pattern = r"```python\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```python\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + else: + matches = [remove_last_line(matches[0])] + + return matches + + +def extract_json_code_blocks(text): + """ + Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. + + Parameters: + - text: A string that may contain one or more json code blocks. + + Returns: + - A list of strings, where each string is a block of json code extracted from the text. + """ + pattern = r"```json\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```json\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + + return matches + + +def remove_last_line(python_code): + """ + Removes the last line from a given string of Python code. + + Parameters: + - python_code: A string representing Python source code. + + Returns: + - A string with the last line removed. + """ + lines = python_code.split("\n") # Split the string into lines + if lines: # Check if there are any lines to remove + lines.pop() # Remove the last line + return "\n".join(lines) # Join the remaining lines back into a string + + +def calculate_similarity(source, target): + """ + Calculate a similarity score between the source and target strings. + This uses the edit distance relative to the length of the strings. + """ + edit_distance = pylcs.edit_distance(source, target) + max_length = max(len(source), len(target)) + # Normalize the score by the maximum possible edit distance (the length of the longer string) + similarity = 1 - (edit_distance / max_length) + return similarity + + +def find_best_match_location(source_code, target_block): + """ + Find the best match for the target_block within the source_code by searching line by line, + considering blocks of varying lengths. + """ + source_lines = source_code.split("\n") + target_lines = target_block.split("\n") + + best_similarity = 0 + best_start_index = 0 + best_end_index = -1 + + # Iterate over the source lines to find the best matching range for all lines in target_block + for start_index in range(len(source_lines) - len(target_lines) + 1): + for end_index in range(start_index + len(target_lines), len(source_lines) + 1): + current_window = "\n".join(source_lines[start_index:end_index]) + current_similarity = calculate_similarity(current_window, target_block) + if current_similarity > best_similarity: + best_similarity = current_similarity + best_start_index = start_index + best_end_index = end_index + + # Convert line indices back to character indices for replacement + char_start_index = len("\n".join(source_lines[:best_start_index])) + ( + 1 if best_start_index > 0 else 0 + ) + char_end_index = len("\n".join(source_lines[:best_end_index])) + + return char_start_index, char_end_index + + +def replace_code_in_source(source_code, replacement_block: str): + """ + Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. + """ + replacement_block = extract_python_code_blocks(replacement_block)[0] + start_index, end_index = find_best_match_location(source_code, replacement_block) + if start_index != -1 and end_index != -1: + # Replace the best matching part with the replacement block + new_source = ( + source_code[:start_index] + replacement_block + source_code[end_index:] + ) + return new_source + else: + return source_code + + +class Operator: + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier": + input = dora_event["value"][0].as_py() + + with open(input["path"], "r", encoding="utf8") as f: + code = f.read() + + user_message = input["user_message"] + start_llm = time.time() + if CHATGPT: + output = self.ask_chatgpt( + CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) + ) + else: + output = self.ask_llm( + CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) + ) + + source_code = replace_code_in_source(code, output) + print("response time:", time.time() - start_llm, flush=True) + send_output( + "modified_file", + pa.array( + [ + { + "raw": source_code, + "path": input["path"], + "response": output, + "prompt": input["user_message"], + } + ] + ), + dora_event["metadata"], + ) + print("response: ", output, flush=True) + send_output( + "assistant_message", + pa.array([output]), + dora_event["metadata"], + ) + elif dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender": + user_message = dora_event["value"][0].as_py() + output = self.ask_llm( + MESSAGE_SENDER_TEMPLATE.format(user_message=user_message) + ) + outputs = extract_json_code_blocks(output)[0] + print("response: ", output, flush=True) + try: + outputs = json.loads(outputs) + if not isinstance(outputs, list): + outputs = [outputs] + for output in outputs: + if not isinstance(output["data"], list): + output["data"] = [output["data"]] + + if output["topic"] in ["led", "blaster"]: + send_output( + output["topic"], + pa.array(output["data"]), + dora_event["metadata"], + ) + + send_output( + "assistant_message", + pa.array([f"sent: {output}"]), + dora_event["metadata"], + ) + else: + send_output( + "assistant_message", + pa.array( + [f"Could not send as topic was not available: {output}"] + ), + dora_event["metadata"], + ) + except: + send_output( + "assistant_message", + pa.array([f"Could not parse json: {outputs}"]), + dora_event["metadata"], + ) + # if data is not iterable, put data in a list + elif dora_event["type"] == "INPUT" and dora_event["id"] == "assistant": + user_message = dora_event["value"][0].as_py() + output = self.ask_llm(ASSISTANT_TEMPLATE.format(user_message=user_message)) + send_output( + "assistant_message", + pa.array([output]), + dora_event["metadata"], + ) + return DoraStatus.CONTINUE + + def ask_llm(self, prompt): + + # Generate output + # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt)) + input = tokenizer(prompt, return_tensors="pt") + input_ids = input.input_ids.cuda() + + # add attention mask here + attention_mask = input["attention_mask"] + + output = model.generate( + inputs=input_ids, + temperature=0.7, + do_sample=True, + top_p=0.95, + top_k=40, + max_new_tokens=512, + attention_mask=attention_mask, + eos_token_id=tokenizer.eos_token_id, + ) + # Get the tokens from the output, decode them, print them + + # Get text between im_start and im_end + return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :] + + def ask_chatgpt(self, prompt): + from openai import OpenAI + + client = OpenAI() + print("---asking chatgpt: ", prompt, flush=True) + response = client.chat.completions.create( + model="gpt-4-turbo-preview", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ], + ) + answer = response.choices[0].message.content + + print("Done", flush=True) + return answer + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "/planning_op.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "code_modifier", + "value": pa.array( + [ + { + "path": path, + "user_message": "change planning to make gimbal follow bounding box ", + }, + ] + ), + "metadata": [], + }, + print, + ) diff --git a/dora-robomaster/operators/microphone_op.py b/dora-robomaster/operators/microphone_op.py new file mode 100644 index 0000000000000000000000000000000000000000..6c3b37ad245fc6290bd259deed171d31a6f79e1f --- /dev/null +++ b/dora-robomaster/operators/microphone_op.py @@ -0,0 +1,36 @@ +import numpy as np +import pyarrow as pa +import sounddevice as sd + +from dora import DoraStatus + +# Set the parameters for recording +SAMPLE_RATE = 16000 +MAX_DURATION = 5 + + +class Operator: + """ + Microphone operator that records the audio + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + audio_data = sd.rec( + int(SAMPLE_RATE * MAX_DURATION), + samplerate=SAMPLE_RATE, + channels=1, + dtype=np.int16, + blocking=True, + ) + + audio_data = audio_data.ravel().astype(np.float32) / 32768.0 + if len(audio_data) > 0: + send_output("audio", pa.array(audio_data), dora_event["metadata"]) + elif dora_event["type"] == "INPUT": + print("Microphone is not recording", dora_event["value"][0].as_py()) + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/mistral_op.py b/dora-robomaster/operators/mistral_op.py new file mode 100644 index 0000000000000000000000000000000000000000..de5f020e8599b39bdbbf0de043f94f41615df3d7 --- /dev/null +++ b/dora-robomaster/operators/mistral_op.py @@ -0,0 +1,189 @@ +from dora import DoraStatus +import pylcs +import textwrap +import pandas as pd +import os +import pyarrow as pa +import numpy as np +from ctransformers import AutoModelForCausalLM +import json + +MIN_NUMBER_LINES = 4 +MAX_NUMBER_LINES = 21 + + +def search_most_simlar_line(text, searched_line): + lines = text.split("\n") + values = [] + + for line in lines[MIN_NUMBER_LINES:MAX_NUMBER_LINES]: + values.append(pylcs.edit_distance(line, searched_line)) + output = lines[np.array(values).argmin() + MIN_NUMBER_LINES] + return output + + +def strip_indentation(code_block): + # Use textwrap.dedent to strip common leading whitespace + dedented_code = textwrap.dedent(code_block) + + return dedented_code + + +def replace_code_with_indentation(original_code, replacement_code): + # Split the original code into lines + lines = original_code.splitlines() + if len(lines) != 0: + # Preserve the indentation of the first line + indentation = lines[0][: len(lines[0]) - len(lines[0].lstrip())] + + # Create a new list of lines with the replacement code and preserved indentation + new_code_lines = indentation + replacement_code + else: + new_code_lines = replacement_code + return new_code_lines + + +def replace_source_code(source_code, gen_replacement): + initial = search_most_simlar_line(source_code, gen_replacement) + print("Initial source code: %s" % initial) + replacement = strip_indentation( + gen_replacement.replace("```python\n", "") + .replace("\n```", "") + .replace("\n", "") + ) + intermediate_result = replace_code_with_indentation(initial, replacement) + print("Intermediate result: %s" % intermediate_result) + end_result = source_code.replace(initial, intermediate_result) + return end_result + + +def save_as(content, path): + # use at the end of replace_2 as save_as(end_result, "file_path") + with open(path, "w") as file: + file.write(content) + + +class Operator: + def __init__(self): + # Load tokenizer + self.llm = AutoModelForCausalLM.from_pretrained( + "TheBloke/OpenHermes-2.5-Mistral-7B-GGUF", + model_file="openhermes-2.5-mistral-7b.Q4_K_M.gguf", + model_type="mistral", + gpu_layers=50, + ) + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + input = dora_event["value"][0].as_py() + + if False: + with open(input["path"], "r", encoding="utf8") as f: + raw = f.read() + prompt = f"{raw[:400]} \n\n {input['query']}. " + output = self.ask_mistral( + "You're a python code expert. Respond with only one line of code that modify a constant variable. Keep the uppercase.", + prompt, + ) + print("output: {}".format(output)) + + source_code = replace_source_code(raw, output) + send_output( + "output_file", + pa.array( + [ + { + "raw": source_code, + "path": input["path"], + "response": output, + "prompt": prompt, + } + ] + ), + dora_event["metadata"], + ) + else: + print("input: ", input, flush=True) + output = self.ask_mistral( + """You're a json expert. Format your response as a json with a topic field and a data field. +The schema for those json are: +- led: Int[3] (min: 0, max: 255) +- blaster: Int (min: 0, max: 128) +- control: Int[3] (min: -1, max: 1) +- rotation: Int[2] (min: -55, max: 55) + + +""", + input["query"], + ) + print("output: {}".format(output), flush=True) + try: + output = json.loads(output) + if not isinstance(output["data"], list): + output["data"] = [output["data"]] + + if output["topic"] in ["led", "blaster", "control", "rotation"]: + print("output", output) + send_output( + output["topic"], + pa.array(output["data"]), + dora_event["metadata"], + ) + except: + print("Could not parse json") + # if data is not iterable, put data in a list + + return DoraStatus.CONTINUE + + def ask_mistral(self, system_message, prompt): + prompt_template = f"""<|im_start|>system + {system_message}<|im_end|> + <|im_start|>user + {prompt}<|im_end|> + <|im_start|>assistant + """ + + # Generate output + outputs = self.llm( + prompt_template, + ) + # Get the tokens from the output, decode them, print them + + # Get text between im_start and im_end + return outputs.split("<|im_end|>")[0] + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "/planning_op.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "tick", + "value": pa.array( + [ + { + "raw": raw, + "path": path, + "query": "le control a 1 0 0", + } + ] + ), + "metadata": [], + }, + print, + ) diff --git a/dora-robomaster/operators/object_detection.py b/dora-robomaster/operators/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..27c085c1019b531fe7d6be1dac71c991f3d4f408 --- /dev/null +++ b/dora-robomaster/operators/object_detection.py @@ -0,0 +1,39 @@ +import numpy as np +import pyarrow as pa + +from dora import DoraStatus +from ultralytics import YOLO + +pa.array([]) + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +model = YOLO("yolov8n.pt") + + +class Operator: + """ + Infering object from images + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + frame = ( + dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = model(frame, verbose=False) # includes NMS + boxes = np.array(results[0].boxes.xyxy.cpu()) + conf = np.array(results[0].boxes.conf.cpu()) + label = np.array(results[0].boxes.cls.cpu()) + # concatenate them together + arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + + send_output("bbox", pa.array(arrays.ravel()), dora_event["metadata"]) + + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/opencv2_stream.py b/dora-robomaster/operators/opencv2_stream.py new file mode 100644 index 0000000000000000000000000000000000000000..30a6cfee8ba75eb7ea916fd8b3e2ceb4e3902c02 --- /dev/null +++ b/dora-robomaster/operators/opencv2_stream.py @@ -0,0 +1,36 @@ +import ffmpeg +import numpy as np +from dora import Node +import pyarrow as pa +import cv2 + +node = Node() + +in_filename = "tcp://192.168.2.1:40921" +# Global variables, change it to adapt your needs +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +process1 = ( + ffmpeg.input(in_filename) + .output("pipe:", format="rawvideo", pix_fmt="rgb24") + .run_async(pipe_stdout=True) +) + +audio = ffmpeg.input(in_filename).audio + +while True: + in_bytes = process1.stdout.read(1280 * 720 * 3) + if not in_bytes: + break + in_frame = np.frombuffer(in_bytes, np.uint8).reshape([720, 1280, 3]) + + ## RGB to BGR + in_frame = in_frame[..., ::-1] + + in_frame = cv2.resize(in_frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + # out_frame = in_frame * 0.5 # do some processing + node.send_output("image", pa.array(in_frame.ravel())) + node.send_output("audio", pa.array(in_frame.ravel())) + +process1.wait() diff --git a/dora-robomaster/operators/opencv_stream.py b/dora-robomaster/operators/opencv_stream.py new file mode 100644 index 0000000000000000000000000000000000000000..900d638980fb6487d514d7bd6b125cf106bbb63f --- /dev/null +++ b/dora-robomaster/operators/opencv_stream.py @@ -0,0 +1,31 @@ +import cv2 +import pyarrow as pa +from dora import Node + +node = Node() +# TCP stream URL (replace with your stream URL) +TCP_STREAM_URL = "tcp://192.168.2.1:40921" +# Global variables, change it to adapt your needs +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +# Create a VideoCapture object using the TCP stream URL +cap = cv2.VideoCapture(TCP_STREAM_URL) + +# Check if the VideoCapture object opened successfully +assert cap.isOpened(), "Error: Could not open video capture." + +while True: + # Read a frame from the stream + ret, frame = cap.read() + + if not ret: + break # Break the loop when no more frames are available + frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + + node.send_output("image", pa.array(frame.ravel())) + + +# Release the VideoCapture object and any OpenCV windows +cap.release() +cv2.destroyAllWindows() diff --git a/dora-robomaster/operators/planning_op.py b/dora-robomaster/operators/planning_op.py new file mode 100644 index 0000000000000000000000000000000000000000..9749073bc01a2dfdf3112a777964701dc4511750 --- /dev/null +++ b/dora-robomaster/operators/planning_op.py @@ -0,0 +1,57 @@ +import time +import numpy as np +import pyarrow as pa +from dora import DoraStatus + +# front-back and left-right movement(float) with min: -1 and max: 1 +POSITION_GOAL = [0, 0] +# pitch and yaw axis angle in degrees(int) with min: -55 and max: 55 +GIMBAL_GOAL = [0, 0] + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + + +class Operator: + def __init__(self): + self.bboxs = np.array([]) + self.time = time.time() + self.position = [0, 0, 0] + + def on_event( + self, + dora_event: dict, + send_output, + ) -> DoraStatus: + global POSITION_GOAL, GIMBAL_GOAL + if dora_event["type"] == "INPUT": + id = dora_event["id"] + if id == "tick": + self.time = time.time() + elif id == "bbox": + value = dora_event["value"].to_numpy() + bboxs = value + self.bboxs = np.reshape( + bboxs, (-1, 6) + ) # min_x, min_y, max_x, max_y, confidence, label + elif id == "position": + value = dora_event["value"].to_numpy() + [x, y, _pitch, _yaw] = value + self.position = [x, y] + direction = np.clip( + np.array(POSITION_GOAL) - np.array(self.position), -1, 1 + ) + + send_output( + "control", + pa.array(direction), + dora_event["metadata"], + ) + + send_output( + "gimbal_control", + pa.array(GIMBAL_GOAL), + dora_event["metadata"], + ) + + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/plot.py b/dora-robomaster/operators/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..7750fc9e0ccaaf53276df0f0b215098df5be5e9e --- /dev/null +++ b/dora-robomaster/operators/plot.py @@ -0,0 +1,117 @@ +import os +import cv2 + + +from dora import DoraStatus +from utils import LABELS + + +CI = os.environ.get("CI") + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +FONT = cv2.FONT_HERSHEY_SIMPLEX + + +class Operator: + """ + Plot image and bounding box + """ + + def __init__(self): + self.bboxs = [] + self.buffer = "" + self.submitted = [] + self.lines = [] + + def on_event( + self, + dora_event, + send_output, + ): + if dora_event["type"] == "INPUT": + id = dora_event["id"] + value = dora_event["value"] + if id == "image": + + image = ( + value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy() + ) + + for bbox in self.bboxs: + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + cv2.rectangle( + image, + (int(min_x), int(min_y)), + (int(max_x), int(max_y)), + (0, 255, 0), + ) + cv2.putText( + image, + f"{LABELS[int(label)]}, {confidence:0.2f}", + (int(max_x), int(max_y)), + FONT, + 0.5, + (0, 255, 0), + ) + + cv2.putText( + image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1 + ) + + i = 0 + for text in self.submitted[::-1]: + color = ( + (0, 255, 190) + if text["role"] == "user_message" + else (0, 190, 255) + ) + cv2.putText( + image, + text["content"], + ( + 20, + 14 + (19 - i) * 14, + ), + FONT, + 0.5, + color, + 1, + ) + i += 1 + + for line in self.lines: + cv2.line( + image, + (int(line[0]), int(line[1])), + (int(line[2]), int(line[3])), + (0, 0, 255), + ) + + if CI != "true": + cv2.imshow("frame", image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + elif id == "bbox": + self.bboxs = value.to_numpy().reshape((-1, 6)) + elif id == "keyboard_buffer": + self.buffer = value[0].as_py() + elif id == "line": + self.lines += [value.to_pylist()] + elif "message" in id: + self.submitted += [ + { + "role": id, + "content": value[0].as_py(), + } + ] + + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/reaction_op.py b/dora-robomaster/operators/reaction_op.py new file mode 100644 index 0000000000000000000000000000000000000000..105fa344798e1743e61bd9c843101bd2c8163139 --- /dev/null +++ b/dora-robomaster/operators/reaction_op.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from typing import Callable, Optional, Union + +from time import sleep +from enum import Enum +import numpy as np +import pyarrow as pa +from utils import LABELS +from dora import DoraStatus + +DISTANCE = 2 + + +class Operator: + """ + Infering object from images + """ + + def __init__(self): + self.over = False + self.start = False + + def on_event( + self, + dora_event: dict, + send_output: Callable[[str, Union[bytes, pa.Array], Optional[dict]], None], + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + return self.on_input(dora_event, send_output) + return DoraStatus.CONTINUE + + def on_input( + self, + dora_input: dict, + send_output: Callable[[str, Union[bytes, pa.array], Optional[dict]], None], + ) -> DoraStatus: + if dora_input["id"] == "bbox": + if not self.start: + send_output("led", pa.array([255, 0, 0]), dora_input["metadata"]) + self.start = True + bboxs = dora_input["value"].to_numpy() + bboxs = np.reshape(bboxs, (-1, 6)) + bottle = False + laser = False + obstacle = False + for bbox in bboxs: + box = True + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + + if ( + (min_x + max_x) / 2 > 240 + and (min_x + max_x) / 2 < 400 + and LABELS[int(label)] == "cup" + ): + laser = True + if ( + (min_x + max_x) / 2 > 240 + and (min_x + max_x) / 2 < 400 + and LABELS[int(label)] == "bottle" + ): + bottle = True + + if LABELS[int(label)] != "ABC" and not obstacle: + obstacle = True + if laser: + send_output("blaster", pa.array([128]), dora_input["metadata"]) + else: + send_output("blaster", pa.array([0]), dora_input["metadata"]) + if bottle: + send_output("led", pa.array([0, 0, 255]), dora_input["metadata"]) + elif obstacle: + send_output("led", pa.array([0, 255, 0]), dora_input["metadata"]) + else: + send_output("led", pa.array([0, 0, 0]), dora_input["metadata"]) + obstacle = False + bottle = False + laser = False + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/robot.py b/dora-robomaster/operators/robot.py new file mode 100644 index 0000000000000000000000000000000000000000..5107e7c48c141f1f3d4550b6bf4da374c11d2edc --- /dev/null +++ b/dora-robomaster/operators/robot.py @@ -0,0 +1,97 @@ +from robomaster import robot, blaster, led +from typing import Callable, Optional, Union + +import numpy as np +import pyarrow as pa + +from dora import DoraStatus + +CONN = "ap" + + +class Operator: + def __init__(self): + self.ep_robot = robot.Robot() + print("Initializing robot...") + assert self.ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot" + assert self.ep_robot.camera.start_video_stream( + display=False + ), "Could not start video stream" + + self.ep_robot.gimbal.recenter().wait_for_completed() + self.position = [0, 0] + self.gimbal_position = [0, 0] + self.event = None + self.brightness = 0 + self.rgb = [0, 0, 0] + + def on_event( + self, + dora_event: str, + send_output: Callable[[str, Union[bytes, pa.UInt8Array], Optional[dict]], None], + ) -> DoraStatus: + event_type = dora_event["type"] + if event_type == "INPUT": + if dora_event["id"] == "tick": + send_output( + "position", + pa.array(self.position + self.gimbal_position), + dora_event["metadata"], + ) + + elif dora_event["id"] == "control": + if not ( + self.event is not None + and not (self.event._event.isSet() and self.event.is_completed) + ): + [x, y] = dora_event["value"].to_numpy() + + if any(abs(np.array([x, y])) > 0.1): + print("received command: ", x, y, flush=True) + self.event = self.ep_robot.chassis.move( + x=x, y=y, z=0.0, xy_speed=0.8, z_speed=0.8 + ) + self.position[0] += x + self.position[1] += y + else: + print("control not completed", flush=True) + print("Set: ", self.event._event.isSet(), flush=True) + print("Completed:", self.event.is_completed, flush=True) + + elif dora_event["id"] == "gimbal_control": + if not ( + self.event is not None + and not (self.event._event.isSet() and self.event.is_completed) + ): + [ + gimbal_pitch, + gimbal_yaw, + ] = dora_event["value"].to_numpy() + + if self.gimbal_position != [gimbal_pitch, gimbal_yaw]: + self.event = self.ep_robot.gimbal.moveto( + pitch=gimbal_pitch, + yaw=gimbal_yaw, + pitch_speed=50, + yaw_speed=50, + ) + self.gimbal_position[0] = gimbal_pitch + self.gimbal_position[1] = gimbal_yaw + + elif dora_event["id"] == "blaster": + [brightness] = dora_event["value"].to_numpy() + if brightness != self.brightness: + self.ep_robot.blaster.set_led( + brightness=brightness, effect=blaster.LED_ON + ) + self.brightness = brightness + elif dora_event["id"] == "led": + [r, g, b] = dora_event["value"].to_numpy() + rgb = [r, g, b] + if rgb != self.rgb: + self.ep_robot.led.set_led( + comp=led.COMP_ALL, r=r, g=g, b=b, effect=led.EFFECT_ON + ) + self.rgb = rgb + + return DoraStatus.CONTINUE diff --git a/dora-robomaster/operators/sentence_transformers_op.py b/dora-robomaster/operators/sentence_transformers_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5c673748f722776e117bb80c8deae0e8c0c491 --- /dev/null +++ b/dora-robomaster/operators/sentence_transformers_op.py @@ -0,0 +1,101 @@ +from sentence_transformers import SentenceTransformer +from sentence_transformers import util + +from dora import DoraStatus +import os +import sys +import inspect +import torch +import pyarrow as pa + +SHOULD_NOT_BE_INCLUDED = [ + "utils.py", + "sentence_transformers_op.py", + "chatgpt_op.py", + "llm_op.py", +] + +SHOULD_BE_INCLUDED = [ + "webcam.py", + "object_detection.py", + "planning_op.py", + "plot.py", +] + + +## Get all python files path in given directory +def get_all_functions(path): + raw = [] + paths = [] + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith(".py"): + if file not in SHOULD_BE_INCLUDED: + continue + path = os.path.join(root, file) + with open(path, "r", encoding="utf8") as f: + ## add file folder to system path + sys.path.append(root) + ## import module from path + raw.append(f.read()) + paths.append(path) + + return raw, paths + + +def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None): + cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0] + top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True) + out = [] + for score, idx in zip(top_results[0], top_results[1]): + out.extend([raw[idx], paths[idx], score]) + return out + + +class Operator: + """ """ + + def __init__(self): + ## TODO: Add a initialisation step + self.model = SentenceTransformer("BAAI/bge-large-en-v1.5") + self.encoding = [] + # file directory + path = os.path.dirname(os.path.abspath(__file__)) + + self.raw, self.path = get_all_functions(path) + # Encode all files + self.encoding = self.model.encode(self.raw) + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + if dora_event["id"] == "query": + values = dora_event["value"].to_pylist() + + query_embeddings = self.model.encode(values) + output = search( + query_embeddings, + self.encoding, + self.path, + self.raw, + ) + [raw, path, score] = output[0:3] + send_output( + "raw_file", + pa.array([{"raw": raw, "path": path, "user_message": values[0]}]), + dora_event["metadata"], + ) + else: + input = dora_event["value"][0].as_py() + index = self.path.index(input["path"]) + self.raw[index] = input["raw"] + self.encoding[index] = self.model.encode([input["raw"]])[0] + + return DoraStatus.CONTINUE + + +if __name__ == "__main__": + operator = Operator() diff --git a/dora-robomaster/operators/utils.py b/dora-robomaster/operators/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dabc915e1935814866d5758911ad426ac9419ee1 --- /dev/null +++ b/dora-robomaster/operators/utils.py @@ -0,0 +1,82 @@ +LABELS = [ + "ABC", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush", +] diff --git a/dora-robomaster/operators/webcam.py b/dora-robomaster/operators/webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..9c722f816039b510cc53e5ba8c22c6f69d62da53 --- /dev/null +++ b/dora-robomaster/operators/webcam.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import time + +import cv2 +import numpy as np +import pyarrow as pa + +from dora import DoraStatus + +CI = os.environ.get("CI") + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 +CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 2)) + +font = cv2.FONT_HERSHEY_SIMPLEX + + +class Operator: + """ + Sending image from webcam to the dataflow + """ + + def __init__(self): + self.video_capture = cv2.VideoCapture(CAMERA_INDEX) + self.start_time = time.time() + self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH) + self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT) + + def on_event( + self, + dora_event: str, + send_output, + ) -> DoraStatus: + event_type = dora_event["type"] + if event_type == "INPUT": + ret, frame = self.video_capture.read() + if ret: + frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + + ## Push an error image in case the camera is not available. + else: + frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (CAMERA_INDEX), + (int(30), int(30)), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) + if CI != "true": + return DoraStatus.CONTINUE + + send_output( + "image", + pa.array(frame.ravel()), + dora_event["metadata"], + ) + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) + + if time.time() - self.start_time < 10000: + return DoraStatus.CONTINUE + else: + return DoraStatus.STOP + + def __del__(self): + self.video_capture.release() + + +if __name__ == "__main__": + op = Operator() + op.on_event( + {"type": "INPUT", "id": "tick", "value": pa.array([0]), "metadata": []}, print + ) diff --git a/dora-robomaster/operators/whisper_op.py b/dora-robomaster/operators/whisper_op.py new file mode 100644 index 0000000000000000000000000000000000000000..feab8b92e8d8018fe7901eaa5eee9f81f6c0817d --- /dev/null +++ b/dora-robomaster/operators/whisper_op.py @@ -0,0 +1,25 @@ +import pyarrow as pa +import whisper + +from dora import DoraStatus + + +model = whisper.load_model("base") + + +class Operator: + """ + Transforming Speech to Text using OpenAI Whisper model + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + audio = dora_event["value"].to_numpy() + audio = whisper.pad_or_trim(audio) + result = model.transcribe(audio, language="en") + send_output("text", pa.array([result["text"]]), dora_event["metadata"]) + return DoraStatus.CONTINUE diff --git a/dora-robomaster/requirements.txt b/dora-robomaster/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f262ce8a7e851d95db5b675b80847e82eb9834be --- /dev/null +++ b/dora-robomaster/requirements.txt @@ -0,0 +1,58 @@ +# Require Python 3.8 +robomaster +dora-rs +torch +torchvision +torchaudio +opencv-python +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +ultralytics +matplotlib>=3.2.2 +numpy>=1.18.5 +Pillow>=7.1.2 +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +--extra-index-url https://download.pytorch.org/whl/cpu +tqdm>=4.64.0 +protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 + +# Logging ------------------------------------- +tensorboard>=2.4.1 +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # interactive notebook +psutil # system utilization +thop>=0.1.1 # FLOPs computation +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +opencv-python>=4.1.1 +pyarrow +maturin + +sounddevice +openai-whisper +sentence-transformers +pynput \ No newline at end of file diff --git a/dora-robomaster/s1_SDK/dji.json b/dora-robomaster/s1_SDK/dji.json new file mode 100644 index 0000000000000000000000000000000000000000..01caa88bf12c66cf6207f7f1bee761534e3cb89c --- /dev/null +++ b/dora-robomaster/s1_SDK/dji.json @@ -0,0 +1,1500 @@ +{ + "log": { + "default_level": "info", + "default_channel": "console", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 2, + "base_dir" : "/blackbox/misc", + "prefix" : "", + "demuxed" : 0, + "file_num_limit" : 100, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8919, + + "channels": [ + {"name": "duss_file_id", "active": 0, "id": 0, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "", "file_num_limit" : 10, "file_max_size_m" : 2, "space_limit_m" : 16, "self_inc_index" : 1}, + {"name": "duss_osal_info", "active": 0, "id": 1, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "", "file_num_limit" : 10, "file_max_size_m" : 2, "space_limit_m" : 16, "self_inc_index" : 1}, + {"name": "duss_tm_msg", "active": 0, "id": 3, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "", "file_num_limit" : 10, "file_max_size_m" : 2, "space_limit_m" : 16, "self_inc_index" : 1}, + {"name": "duss_data_info", "active": 0, "id": 5, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "", "file_num_limit" : 10, "file_max_size_m" : 2, "space_limit_m" : 16, "self_inc_index" : 1}, + {"name": "gimbal", "active": 1, "id": 7, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "GIMBAL", "file_num_limit" : 20, "file_max_size_m" : 30, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "armor1", "active": 1, "id": 9, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR1", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "armor2", "active": 1, "id": 11, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR2", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "armor3", "active": 1, "id": 2, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR3", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "armor4", "active": 1, "id": 4, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR4", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "armor5", "active": 1, "id": 6, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR5", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "armor6", "active": 1, "id": 8, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "ARMOR6", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 30, "self_inc_index" : 1}, + {"name": "gamesystem", "active": 1, "id": 10, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "GAMESYSTEM", "file_num_limit" : 20, "file_max_size_m" : 30, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "gun", "active": 1, "id": 12, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "GUN", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "camera", "active": 1, "id": 14, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "CAMERA", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 50, "self_inc_index" : 1}, + {"name": "vision", "active": 1, "id": 13, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "VISION", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "scratch_sys", "active": 1, "id": 15, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "SCRATCH_SYS", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "scratch_script", "active": 1, "id": 16, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "SCRATCH_SCRIPT", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "chassis", "active": 1, "id": 17, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "CHASSIS", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "network", "active": 1, "id": 18, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "NETWORK", "file_num_limit" : 20, "file_max_size_m" : 30, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "dji_system", "active": 1, "id": 19, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "SYSTEM", "file_num_limit" : 20, "file_max_size_m" : 30, "space_limit_m" : 100, "self_inc_index" : 1}, + {"name": "stick", "active": 1, "id": 20, "checksum": "no", "high_priority": 0, "sync": 0, "dma": 0, "shm": 0, "prefix": "STICK", "file_num_limit" : 20, "file_max_size_m" : 10, "space_limit_m" : 200, "self_inc_index" : 1} + ] + }, + "platform": { + "name": "common", + "pairing_gpio": 185, + "dev_info": "xw0607edu robot", + "hw_str": "xw0607edu robot", + "system_init": "sys_platform_xw607_init" + }, + "hal": { + "device_list": [ + {"name": "/dev/video0", "attach": "duss_hal_attach_p1_v4l2", "detach": "duss_hal_detach_p1_v4l2" }, + {"name": "/dev/ienc0", "attach": "duss_hal_attach_p1_ienc", "detach": "duss_hal_detach_p1_ienc" }, + {"name": "/dev/iencv", "attach": "duss_hal_attach_p1_ienc", "detach": "duss_hal_detach_p1_ienc" }, + {"name": "/dev/venc0", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/venc1", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/venc2", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/venc3", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/vencvf", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/vencvb", "attach": "duss_hal_attach_p1_venc", "detach": "duss_hal_detach_p1_venc" }, + {"name": "/dev/vdec0", "attach": "duss_hal_attach_p1_vdec", "detach": "duss_hal_detach_p1_vdec" }, + {"name": "/dev/vmem", "attach": "duss_hal_attach_p1_vmem", "detach": "duss_hal_detach_p1_vmem" }, + {"name": "/dev/vmem_cache", "attach": "duss_hal_attach_p1_vmem", "detach": "duss_hal_detach_p1_vmem" }, + {"name": "/dev/omem", "attach": "duss_hal_attach_p1_vmem", "detach": "duss_hal_detach_p1_vmem" }, + {"name": "/dev/o2d", "attach": "duss_hal_attach_p1_o2d", "detach": "duss_hal_detach_p1_o2d" }, + {"name": "/dev/o2d2", "attach": "duss_hal_attach_p1_o2d", "detach": "duss_hal_detach_p1_o2d" }, + {"name": "/dev/v2d", "attach": "duss_hal_attach_p1_v2d", "detach": "duss_hal_detach_p1_v2d" }, + {"name": "/dev/ttyS0", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyS1", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyS2", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyS3", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM:v4255p0052", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM:vFFF0p0008", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM0", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM1", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM2", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/ttyACM3", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" }, + {"name": "/dev/i2c-0", "attach": "duss_hal_attach_linux_i2c", "detach": "duss_hal_detach_linux_i2c" }, + {"name": "/dev/i2c-1", "attach": "duss_hal_attach_linux_i2c", "detach": "duss_hal_detach_linux_i2c" }, + {"name": "/dev/i2c-4", "attach": "duss_hal_attach_linux_i2c", "detach": "duss_hal_detach_linux_i2c" }, + {"name": "/dev/spidev0.0", "attach": "duss_hal_attach_linux_spi", "detach": "duss_hal_detach_linux_spi" }, + {"name": "/dev/spidev1.0", "attach": "duss_hal_attach_linux_spi", "detach": "duss_hal_detach_linux_spi" }, + {"name": "/sys/class/gpio", "attach": "duss_hal_attach_linux_gpio", "detach": "duss_hal_detach_linux_gpio" }, + {"name": "/dev/speaker0", "attach": "duss_hal_attach_speaker", "detach": "duss_hal_detach_speaker" }, + {"name": "/dev/ttyGS0", "attach": "duss_hal_attach_linux_tty", "detach": "duss_hal_detach_linux_tty" } + ] + }, + "process": { + "dji_monitor": { + "log": { + "default_level": "error", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_monitor", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 1, + "space_limit_m" : 8, + "cache_size_m" : 1, + "output_port" : 8905 + } + }, + "dji_sys": { + "log": { + "default_level": "info", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_module": [ + {"upgrade": "info"} + ], + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_system", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8906 + } + }, + "dji_camera": { + "log": { + "default_level": "error", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_module": [ + {"network": "info"} + ], + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_camera", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8907 + } + }, + "dji_vision": { + "log": { + "default_level": "error", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_vision", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 16, + "space_limit_m" : 512, + "cache_size_m" : 1, + "output_port" : 8909 + } + }, + "dji_hdvt_uav": { + "log": { + "default_level": "error", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_module": [ + {"upgrade": "info"}, + {"network": "info"} + ], + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_hdvt_uav", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8910 + } + }, + "dji_mb_ctrl": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_mb_ctrl", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 1, + "space_limit_m" : 4, + "cache_size_m" : 1, + "output_port" : 8911 + } + }, + "dji_network": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_network", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8912 + } + }, + "dji_sw_uav": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_sw_uav_monitor", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 2, + "space_limit_m" : 16, + "cache_size_m" : 1, + "output_port" : 8913 + } + }, + "sdrs": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/sdrs", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 1, + "space_limit_m" : 4, + "cache_size_m" : 1, + "output_port" : 8914 + } + }, + "sdrs_log": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/sdrs_log", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 1, + "space_limit_m" : 4, + "cache_size_m" : 1, + "output_port" : 8915 + } + }, + "dji_blackbox": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox", + "prefix" : "FLY", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 700, + "space_limit_m" : 2200, + "cache_size_m" : 16, + "output_port" : 8916 + } + }, + "dji_perception": { + "log": { + "default_level": "warning", + "default_channel": "android_log", + "default_mask": "enable_all", + "default_format": [ + "basic" + ] + }, + "blackbox": { + "active" : 3, + "base_dir" : "/blackbox/dji_perception", + "prefix" : "", + "demuxed" : 1, + "file_num_limit" : 10, + "file_max_size_m" : 16, + "space_limit_m" : 512, + "cache_size_m" : 1, + "output_port" : 8925 + } + } + }, + "vtair_service": { + "mb_route_table": { + "vt_sdr_fc": { + "host": "vt_air", "index": 0, + "1": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "2": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "4": {"status": 1, "target": "glass", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "5": {"status": 1, "target": "rc", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "6": {"status": 1, "target": "ve_gnd", "index": 1, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "7": {"status": 1, "target": "vt_gnd", "index": 1, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "8": {"status": 1, "target": "ve_gnd", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 1, "server": true, "protocol": "dmp_v2"}}, + "9": {"status": 1, "target": "vt_gnd", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 1, "server": true, "protocol": "dmp_v2"}}, + "10": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "11": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "12": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "13": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "14": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "15": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "16": {"status": 1, "target": "flight", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "17": {"status": 1, "target": "flight", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "18": {"status": 1, "target": "flight", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "19": {"status": 1, "target": "battery", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity":0, "stopbit": 0, "wordlen": 8}}, + "20": {"status": 1, "target": "esc", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "21": {"status": 1, "target": "esc", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "22": {"status": 1, "target": "esc", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "23": {"status": 1, "target": "esc", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "24": {"status": 1, "target": "armor", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "25": {"status": 1, "target": "armor", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "26": {"status": 1, "target": "armor", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "27": {"status": 1, "target": "armor", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "28": {"status": 1, "target": "armor", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "29": {"status": 1, "target": "armor", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "30": {"status": 1, "target": "gun", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "31": {"status": 1, "target": "gimbal", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "32": {"status": 1, "target": "gimbal", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "33": {"status": 1, "target": "vt_air", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "34": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "35": {"status": 1, "target": "armor", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "36": {"status": 1, "target": "robotic_part", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "37": {"status": 1, "target": "robotic_part", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "38": {"status": 1, "target": "robotic_part", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "39": {"status": 1, "target": "robotic_part", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "40": {"status": 1, "target": "robotic_part", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "41": {"status": 1, "target": "robotic_part", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "42": {"status": 1, "target": "robotic_part", "index": 7, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "43": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "44": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 6}}, + "45": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "46": {"status": 1, "target": "gun", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}} + }, + "vt_sdr_mdl": { + "host": "vt_air", "index": 0, + "1": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "2": {"status": 1, "target": "flight", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "3": {"status": 1, "target": "flight", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "4": {"status": 1, "target": "flight", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "5": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "6": {"status": 0, "target": "vt_gnd", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "7": {"status": 0, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "8": {"status": 0, "target": "glass", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "9": {"status": 0, "target": "rc", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "10": {"status": 0, "target": "ve_gnd", "index": 1, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "11": {"status": 0, "target": "ve_gnd", "index": 1, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "12": {"status": 0, "target": "ve_gnd", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "13": {"status": 0, "target": "vt_gnd", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "14": {"status": 0, "target": "glass", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp_v2"}}, + "15": {"status": 0, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "16": {"status": 1, "target": "ve_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "17": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "18": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "19": {"status": 1, "target": "vt_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 7}}, + "20": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "21": {"status": 1, "target": "all", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "all", "index": 0}}, + "22": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "23": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "24": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "25": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "26": {"status": 1, "target": "battery", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity":0, "stopbit": 0, "wordlen": 8}}, + "27": {"status": 1, "target": "esc", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "28": {"status": 1, "target": "esc", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "29": {"status": 1, "target": "esc", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "30": {"status": 1, "target": "esc", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "31": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 10607, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, + "protocol": "sw_v2_proto", "priority": "HIGHEST", "flags": 1}}, + "32": {"status": 1, "target": "mobile", "index": 7, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 10607, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, + "protocol": "sw_v2_proto", "priority": "HIGHEST", "flags": 1}}, + "33": {"status": 1, "target": "armor", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "34": {"status": 1, "target": "armor", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "35": {"status": 1, "target": "armor", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "36": {"status": 1, "target": "armor", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "37": {"status": 1, "target": "armor", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "38": {"status": 1, "target": "armor", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "39": {"status": 1, "target": "gun", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "40": {"status": 1, "target": "gimbal", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "41": {"status": 1, "target": "vt_air", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "42": {"status": 1, "target": "gimbal", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "43": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "44": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 6}}, + "45": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "46": {"status": 1, "target": "armor", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "47": {"status": 1, "target": "robotic_part", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "48": {"status": 1, "target": "robotic_part", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "49": {"status": 1, "target": "robotic_part", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "50": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "51": {"status": 1, "target": "robotic_part", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "52": {"status": 1, "target": "robotic_part", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "53": {"status": 1, "target": "robotic_part", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "54": {"status": 1, "target": "robotic_part", "index": 7, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "55": {"status": 1, "target": "gun", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "56": {"status": 1, "target": "pin_bd", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "57": {"status": 1, "target": "pin_bd", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "58": {"status": 1, "target": "pin_bd", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "59": {"status": 1, "target": "pin_bd", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "60": {"status": 1, "target": "pin_bd", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "61": {"status": 1, "target": "pin_bd", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "62": {"status": 1, "target": "pin_bd", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "63": {"status": 1, "target": "pin_bd", "index": 7, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "64": {"status": 1, "target": "pin_bd", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "65": {"status": 1, "target": "servo", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "66": {"status": 1, "target": "servo", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "67": {"status": 1, "target": "servo", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "68": {"status": 1, "target": "servo", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "69": {"status": 1, "target": "servo", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "70": {"status": 1, "target": "servo", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "71": {"status": 1, "target": "servo", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "72": {"status": 1, "target": "servo", "index": 7, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "73": {"status": 1, "target": "bvision", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "74": {"status": 1, "target": "bvision", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "75": {"status": 1, "target": "bvision", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "76": {"status": 1, "target": "bvision", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "77": {"status": 1, "target": "bvision", "index": 4, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyS3", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "78": {"status": 0, "target": "mobile", "index": 0, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "rndis0", "local_port": 20020, "remote_address": "192.168.42.3", "remote_port": 20010}}, + "79": {"status": 0, "target": "mobile", "index": 0, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "wlan0", "local_port": 20020, "remote_address": "192.168.2.22", "remote_port": 20010, "server": true, "protocol": "udp"}}, + "80": {"status": 0, "target": "vt_air", "index": 6, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "rndis0", "local_port": 20020, "remote_address": "192.168.42.3", "remote_port": 20010}}, + "81": {"status": 0, "target": "vt_air", "index": 6, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "wlan0", "local_port": 20020, "remote_address": "192.168.2.22", "remote_port": 20010, "server": true, "protocol": "udp"}}, + "82": {"status": 0, "target": "vt_air", "index": 6, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 20020, "remote_address": "0.0.0.0", "remote_port": 20010, "server": true, + "protocol": "tcp", "priority": "HIGHEST", "flags": 1}} + } + } + }, + "camera_service": { + "mb_route_table": { + "camera": { + "host": "camera", "index": 0, + "1": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "2": {"status": 0, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "3": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "5": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "6": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "8": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "9": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "10": {"status": 0, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 10607, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, + "protocol": "sw_proto", "priority": "HIGHEST", "flags": 1}}, + "11": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "12": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "13": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "14": {"status": 1, "target": "vt_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 7}}, + "16": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "17": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "18": {"status": 1, "target": "ve_gnd", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "19": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "20": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "21": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 0}}, + "22": {"status": 1, "target": "test", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 1}}, + "23": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 0}} + }, + "camera_local": { + "host": "camera", "index": 0, + "1": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_gnd", "index": 0}}, + "2": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mobile", "index": 0}}, + "3": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}} + }, + "camera_sdr": { + "host": "camera", "index": 0, + "1": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "2": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 9001, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "dmp"}}, + "3": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "5": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}} + }, + "camera_sdr_udt": { + "host": "camera", "index": 1, + "1": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "192.168.41.2", "remote_port": 5555, "server": false, "protocol": "udt"}}, + "2": {"status": 1, "target": "glass", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "192.168.41.3", "remote_port": 5555, "server": false, "protocol": "udt"}} + } + } + }, + "veair_service": { + "mb_route_table": { + "veair": { + "host": "ve_air", "index": 0, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "3": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "6": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "7": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "8": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "9": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "10": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 10607, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, + "protocol": "sw_proto", "priority": "HIGHEST", "flags": 1}}, + "11": {"status": 1, "target": "flight", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "12": {"status": 1, "target": "flight", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "13": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "14": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "15": {"status": 1, "target": "esc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "16": {"status": 1, "target": "esc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "17": {"status": 1, "target": "esc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "18": {"status": 1, "target": "esc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + } + } + }, + "vision_service": { + "mb_route_table": { + "vision": { + "host": "mvision", "index": 7, + "1": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "2": {"status": 1, "target": "ve_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "3": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "5": {"status": 1, "target": "ve_air", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 5}}, + "6": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "7": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "8": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "9": {"status": 1, "target": "ve_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 2}}, + "10": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "11": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "12": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "13": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "14": {"status": 1, "target": "glass", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "15": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "16": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 0}} + } + } + }, + "network_service": { + "mb_route_table": { + "network_local": { + "host": "network", "index": 0, + "1": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "2": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "3": {"status": 1, "target": "wifi_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "5": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "6": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "7": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "8": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "9": {"status": 1, "target": "esc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "10": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "11": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "12": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + }, + "sw_monitor": { + "host": "network", "index": 1, + "1": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}} + } + } + }, + "system_service": { + "mb_route_table": { + "system": { + "host": "ve_air", "index": 1, + "1": {"status": 1, "target": "ve_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "2": {"status": 1, "target": "ve_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 2}}, + "3": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 3}}, + "4": {"status": 1, "target": "ve_air", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 4}}, + "5": {"status": 1, "target": "ve_air", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 5}}, + "6": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "7": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "8": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "9": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "10": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "11": {"status": 1, "target": "ve_gnd", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "12": {"status": 1, "target": "vt_gnd", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "13": {"status": 1, "target": "glass", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "14": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "15": {"status": 1, "target": "vt_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 7}}, + "16": {"status": 0, "target": "flight", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "17": {"status": 0, "target": "flight", "index": 5, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "18": {"status": 0, "target": "flight", "index": 6, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "19": {"status": 0, "target": "battery", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "20": {"status": 0, "target": "esc", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "21": {"status": 0, "target": "esc", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "22": {"status": 0, "target": "esc", "index": 2, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "23": {"status": 0, "target": "esc", "index": 3, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:vFFF0p0008", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "24": {"status": 0, "target": "camera", "index": 0, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "usb0", "local_port": 20001, "remote_address": "192.168.1.3", "remote_port": 20001}}, + "25": {"status": 0, "target": "camera", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:v4255p0052", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "26": {"status": 0, "target": "camera", "index": 1, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "usb0", "local_port": 20001, "remote_address": "192.168.1.3", "remote_port": 20001}}, + "27": {"status": 0, "target": "camera", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM:v4255p0052", "baudrate": 115200, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "25": {"status": 0, "target": "gimbal", "index": 0, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "usb0", "local_port": 20001, "remote_address": "192.168.1.3", "remote_port": 20001}}, + "27": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "28": {"status": 1, "target": "pc", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyGS0", "baudrate": 8000000, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "29": {"status": 1, "target": "pc", "index": 1, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyGS0", "baudrate": 8000000, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "30": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "31": {"status": 0, "target": "pc", "index": 3, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 22350, "remote_address": "0.0.0.0", "remote_port": 0, "server": true, "protocol": "tcp"}}, + "32": {"status": 1, "target": "whoami", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "whoami", "index": 0}}, + "33": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "34": {"status": 1, "target": "all", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "all", "index": 0}}, + "35": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "36": {"status": 1, "target": "flight", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "37": {"status": 1, "target": "esc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "38": {"status": 1, "target": "esc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "39": {"status": 1, "target": "esc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "40": {"status": 1, "target": "esc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "41": {"status": 1, "target": "armor", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "42": {"status": 1, "target": "armor", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "43": {"status": 1, "target": "armor", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "44": {"status": 1, "target": "armor", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "45": {"status": 1, "target": "armor", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "46": {"status": 1, "target": "armor", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "47": {"status": 1, "target": "gun", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "48": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "49": {"status": 1, "target": "vt_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "50": {"status": 1, "target": "vt_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "51": {"status": 1, "target": "gimbal", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "52": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "53": {"status": 1, "target": "robotic_part", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "54": {"status": 1, "target": "robotic_part", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "55": {"status": 1, "target": "robotic_part", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "56": {"status": 1, "target": "robotic_part", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "57": {"status": 1, "target": "robotic_part", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "58": {"status": 1, "target": "robotic_part", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "59": {"status": 1, "target": "robotic_part", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "60": {"status": 1, "target": "armor", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "61": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "62": {"status": 1, "target": "ve_air", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 6}}, + "63": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance":0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "64": {"status": 1, "target": "gun", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "63": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance":0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 0}}, + "64": {"status": 1, "target": "gun", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "65": {"status": 1, "target": "pin_bd", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "66": {"status": 1, "target": "pin_bd", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "67": {"status": 1, "target": "pin_bd", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "68": {"status": 1, "target": "pin_bd", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "69": {"status": 1, "target": "pin_bd", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "70": {"status": 1, "target": "pin_bd", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "71": {"status": 1, "target": "pin_bd", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "72": {"status": 1, "target": "servo", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "73": {"status": 1, "target": "servo", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "74": {"status": 1, "target": "servo", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "75": {"status": 1, "target": "servo", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "76": {"status": 1, "target": "servo", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "77": {"status": 1, "target": "servo", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "78": {"status": 1, "target": "servo", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "79": {"status": 1, "target": "bvision", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "80": {"status": 1, "target": "bvision", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "81": {"status": 1, "target": "bvision", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "82": {"status": 1, "target": "bvision", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + }, + "bvision": { + "host": "ve_air", "index": 2, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "6": {"status": 1, "target": "pc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "8": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "9": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}} + }, + "sdr": { + "host": "vt_air", "index": 7, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "vt_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "6": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "7": {"status": 1, "target": "pc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "8": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "9": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}} + }, + "dji_vision": { + "host": "ve_air", "index": 5, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "6": {"status": 1, "target": "pc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "8": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "9": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "10": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "11": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}} + }, + "dji_scratch": { + "host": "ve_air", "index": 3, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "pc", "index": 2}}, + "6": {"status": 1, "target": "pc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "test", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "test", "index": 0}}, + "8": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "9": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "10": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "11": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}} + }, + "dji_m4": { + "host": "ve_air", "index": 6, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "pc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "5": {"status": 1, "target": "pc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "6": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}} + }, + "dji_m4_up": { + "host": "bvision", "index":7, + "1": {"status": 1, "target": "bvision", "index": 1, "channel": "usb", "distance": 0, "protocol": "v1", + "usb": {"device":"TOF", "vid":"ff00", "pid":"00ff", "interface":0, "altsetting":0, "in_ep":"129", "out_ep":"1"}} + }, + "sec_upgrade": { + "host": "pc", "index": 2, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "camera", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "9": {"status": 1, "target": "flight", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "10": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "11": {"status": 1, "target": "esc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "12": {"status": 1, "target": "esc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "13": {"status": 1, "target": "esc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "14": {"status": 1, "target": "esc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "15": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "16": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "17": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "18": {"status": 1, "target": "ve_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 2}}, + "19": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 3}}, + "20": {"status": 1, "target": "ve_air", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 4}}, + "21": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "22": {"status": 1, "target": "ve_air", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 5}}, + "23": {"status": 1, "target": "vt_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 7}}, + "24": {"status": 1, "target": "gimbal", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "25": {"status": 1, "target": "gun", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "26": {"status": 1, "target": "armor", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "27": {"status": 1, "target": "armor", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "28": {"status": 1, "target": "armor", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "29": {"status": 1, "target": "armor", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "30": {"status": 1, "target": "armor", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "31": {"status": 1, "target": "armor", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "32": {"status": 1, "target": "vt_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "33": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance":0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "34": {"status": 1, "target": "robotic_part", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "35": {"status": 1, "target": "robotic_part", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "36": {"status": 1, "target": "robotic_part", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "37": {"status": 1, "target": "robotic_part", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "38": {"status": 1, "target": "robotic_part", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "39": {"status": 1, "target": "robotic_part", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "40": {"status": 1, "target": "robotic_part", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "41": {"status": 1, "target": "gun", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "65": {"status": 1, "target": "pin_bd", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "66": {"status": 1, "target": "pin_bd", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "67": {"status": 1, "target": "pin_bd", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "68": {"status": 1, "target": "pin_bd", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "69": {"status": 1, "target": "pin_bd", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "70": {"status": 1, "target": "pin_bd", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "71": {"status": 1, "target": "pin_bd", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "72": {"status": 1, "target": "servo", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "73": {"status": 1, "target": "servo", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "74": {"status": 1, "target": "servo", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "75": {"status": 1, "target": "servo", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "76": {"status": 1, "target": "servo", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "77": {"status": 1, "target": "servo", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "78": {"status": 1, "target": "servo", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "79": {"status": 1, "target": "bvision", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "80": {"status": 1, "target": "bvision", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "81": {"status": 1, "target": "bvision", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "82": {"status": 1, "target": "bvision", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + }, + "whoami": { + "host": "whoami", "index": 0, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}} + }, + "all": { + "host": "all", "index": 0, + "1": {"status": 1, "target": "pc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "pc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}} + } + }, + "configuration": { + "sub_devices": { + "p1": { + "with_amba": 0, "conn_mobile": 0, "with_led": 1 + }, + "bvision": { + "status": 0, "spi_dev": "/dev/spidev0.0", "rate": 10000000, "reset_gpio": 108 + }, + "ltc": { + "status": 0, "i2c_dev": "/dev/i2c-1", "rate": 400000, "program_gpio": 221, "ver_ifx": 1 + }, + "ultrasonic": { + "status": 0, "uart_dev": "/dev/ttyS0", "baudrate": 115200, "reset_gpio": 228 + }, + "download": { + "status": 0 + }, + "sdr": { + "status": 1 + }, + "dji_vision": { + "status": 1 + }, + "dji_scratch": { + "status": 1 + } + } + } + }, + "scratch_service": { + "mb_route_table": { + "scratch_client": { + "host": "vt_air", "index": 5, + "1": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "camera", "index": 0}}, + "2": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 0}}, + "3": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "mvision", "index": 7}}, + "4": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "v1", + "local": {"direct_host": "ve_air", "index": 3}} + + } + } + }, + "download_service": { + "mb_route_table": { + "download": { + "host": "vt_air", "index": 5, + "1": {"status": 1, "target": "camera", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 20002, "remote_address": "192.168.1.3", "remote_port": 20002, "server": true, "protocol": "tcp"}}, + "3": {"status": 1, "target": "mobile", "index": 0, "channel": "wl", "distance": 0, "protocol": "v1", + "wl": {"interface": "iwlan0", "local_port": 0, "remote_address": "192.168.41.2", "remote_port": 5555, "server": false, "protocol": "udt"}}, + "5": {"status": 0, "target": "camera", "index": 7, "channel": "ip", "distance": 0, "protocol": "v1", + "ip": {"interface": "usb0", "local_port": 20000, "remote_address": "192.168.1.3", "remote_port": 20000}} + } + } + }, + "blackbox_service": { + "mb_route_table": { + "blackbox_local": { + "host": "blackbox", "index": 0, + "1": {"status": 1, "target": "ve_air", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 7}}, + "2": {"status": 1, "target": "gimbal", "index": 0, "channel": "uart", "distance": 0, "protocol": "v1", + "uart": {"interface": "/dev/ttyACM0", "baudrate": 921600, "parity": 0, "stopbit": 0, "wordlen": 8}}, + "3": {"status": 1, "target": "armor", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "4": {"status": 1, "target": "armor", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "5": {"status": 1, "target": "armor", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "6": {"status": 1, "target": "armor", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "7": {"status": 1, "target": "armor", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "8": {"status": 1, "target": "armor", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "9": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "10": {"status": 1, "target": "gun", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "11": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "12": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "13": {"status": 1, "target": "gun", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + } + } + }, + "test": { + "mb_route_table": { + "local": { + "host": "test", "index": 0, + "1": {"status": 1, "target": "flight", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "flight", "index": 0}}, + "2": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "3": {"status": 1, "target": "ve_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "4": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "gimbal", "index": 0}}, + "7": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "8": {"status": 1, "target": "ve_air", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 4}}, + "9": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "10": {"status": 1, "target": "ve_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 2}} + }, + "sdcard_record_test" : { + "host": "test", "index": 1, + "0": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}} + }, + "diag": { + "host": "ve_air", "index": 7, + "1": {"status": 1, "target": "ve_air", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "2": {"status": 1, "target": "camera", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "camera", "index": 0}}, + "3": {"status": 1, "target": "camera", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "4": {"status": 1, "target": "flight", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "5": {"status": 1, "target": "flight", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "6": {"status": 1, "target": "flight", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "7": {"status": 1, "target": "flight", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "8": {"status": 1, "target": "flight", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "9": {"status": 1, "target": "flight", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "10": {"status": 1, "target": "flight", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "11": {"status": 1, "target": "esc", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "12": {"status": 1, "target": "esc", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "13": {"status": 1, "target": "esc", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "14": {"status": 1, "target": "esc", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "15": {"status": 1, "target": "battery", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "16": {"status": 1, "target": "gimbal", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "17": {"status": 1, "target": "vt_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "18": {"status": 1, "target": "ve_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 2}}, + "19": {"status": 1, "target": "ve_air", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "20": {"status": 1, "target": "ve_air", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 4}}, + "21": {"status": 1, "target": "mvision", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "mvision", "index": 7}}, + "22": {"status": 1, "target": "ve_gnd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "23": {"status": 1, "target": "ve_air", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 0}}, + "24": {"status": 1, "target": "network", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "network", "index": 0}}, + "25": {"status": 1, "target": "gun", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "26": {"status": 1, "target": "armor", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "27": {"status": 1, "target": "armor", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "28": {"status": 1, "target": "armor", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "29": {"status": 1, "target": "armor", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "30": {"status": 1, "target": "armor", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "31": {"status": 1, "target": "armor", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "32": {"status": 1, "target": "vt_air", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "33": {"status": 1, "target": "gimbal", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "34": {"status": 1, "target": "blackbox", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "blackbox", "index": 0}}, + "35": {"status": 1, "target": "ve_air", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 1}}, + "36": {"status": 1, "target": "robotic_part", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "37": {"status": 1, "target": "robotic_part", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "38": {"status": 1, "target": "robotic_part", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "39": {"status": 1, "target": "armor", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "40": {"status": 1, "target": "vt_air", "index": 5, "channel": "local", "distance":0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 5}}, + "41": {"status": 1, "target": "vt_air", "index": 6, "channel": "local", "distance":0, "protocol": "v1", + "local": {"direct_host": "vt_air", "index": 6}}, + "44": {"status": 1, "target": "ve_air", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "ve_air", "index": 6}}, + "45": {"status": 1, "target": "robotic_part", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "46": {"status": 1, "target": "robotic_part", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "47": {"status": 1, "target": "robotic_part", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "48": {"status": 1, "target": "robotic_part", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "49": {"status": 1, "target": "gun", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "50": {"status": 1, "target": "pin_bd", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "51": {"status": 1, "target": "pin_bd", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "52": {"status": 1, "target": "pin_bd", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "53": {"status": 1, "target": "pin_bd", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "54": {"status": 1, "target": "pin_bd", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "55": {"status": 1, "target": "pin_bd", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "56": {"status": 1, "target": "pin_bd", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "57": {"status": 1, "target": "pin_bd", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "58": {"status": 1, "target": "servo", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "59": {"status": 1, "target": "servo", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "60": {"status": 1, "target": "servo", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "61": {"status": 1, "target": "servo", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "62": {"status": 1, "target": "servo", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "63": {"status": 1, "target": "servo", "index": 5, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "64": {"status": 1, "target": "servo", "index": 6, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "65": {"status": 1, "target": "servo", "index": 7, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "66": {"status": 1, "target": "bvision", "index": 0, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "66": {"status": 1, "target": "bvision", "index": 1, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "67": {"status": 1, "target": "bvision", "index": 2, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "68": {"status": 1, "target": "bvision", "index": 3, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}}, + "69": {"status": 1, "target": "bvision", "index": 4, "channel": "local", "distance": 0, "protocol": "logic", + "local": {"direct_host": "vt_air", "index": 0}} + } + } + } +} diff --git a/dora-robomaster/s1_SDK/dji_hdvt_uav b/dora-robomaster/s1_SDK/dji_hdvt_uav new file mode 100644 index 0000000000000000000000000000000000000000..b46541da02c606612b69bd09d1c67c4489d56351 Binary files /dev/null and b/dora-robomaster/s1_SDK/dji_hdvt_uav differ diff --git a/dora-robomaster/s1_SDK/dji_scratch/bin/dji_scratch.py b/dora-robomaster/s1_SDK/dji_scratch/bin/dji_scratch.py new file mode 100644 index 0000000000000000000000000000000000000000..ebeeef6178ccc3340ab26b55a44d8afa8e5cd946 --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/bin/dji_scratch.py @@ -0,0 +1,244 @@ +import sys + +sys.path.append("/data/dji_scratch/src/robomaster/custom_ui") +sys.path.append("/data/dji_scratch/src/robomaster/multi_comm") +sys.path.append("/data/dji_scratch/sdk") +sys.path.append("/data/dji_scratch/sdk/plaintext_sdk") +import rm_log +import event_client +import script_manage +import duml_cmdset +import rm_define +import duss_event_msg +import tools +import time +import signal +import traceback +import os +import rm_socket +import rm_ctrl +import subprocess + +subprocess.Popen(["/system/bin/sh", "/data/patch.sh"]) + +LOG_STREAM_OUT_FLAG = True + +LOG_FILE_OUT_LEVEL = rm_log.INFO +LOG_STREAM_OUT_LEVEL = rm_log.INFO + +param = os.sched_param(5) +os.sched_setaffinity( + 0, + ( + 0, + 1, + ), +) +os.sched_setscheduler(0, os.SCHED_RR, param) + +logger = rm_log.dji_scratch_logger_get() + +event_dji_system = event_client.EventClient(rm_define.system_host_id) + +if not LOG_STREAM_OUT_FLAG: + LOG_STREAM_OUT_LEVEL = None +logger = rm_log.logger_init( + logger, event_dji_system, LOG_FILE_OUT_LEVEL, LOG_STREAM_OUT_LEVEL +) + +local_sub_service = script_manage.LocalSubService(event_dji_system) +script_ctrl = script_manage.ScriptCtrl(event_dji_system) +script_process = script_manage.ScriptProcessCtrl(script_ctrl, local_sub_service) +local_sub_service.init_sys_power_on_time() + +# creat a ModulesStatusCtrl and init it to get the status of other moudles +modulesStatus_ctrl = rm_ctrl.ModulesStatusCtrl(event_dji_system) +modulesStatus_ctrl.init() +# share the object(modulesStatus_ctrl) to script_ctrl thredef +script_ctrl.register_modulesStatusCtrl_obj(modulesStatus_ctrl) + +push_heartbeat_id = ( + duml_cmdset.DUSS_MB_CMDSET_COMMON << 8 | duml_cmdset.DUSS_MB_CMD_COM_HEARTBEAT +) +event_dji_system.async_req_register( + push_heartbeat_id, script_process.request_push_heartbeat +) + +activeMsg = duss_event_msg.EventMsg(tools.hostid2senderid(event_dji_system.my_host_id)) +activeMsg.set_default_receiver(rm_define.system_id) +activeMsg.set_default_cmdset(duml_cmdset.DUSS_MB_CMDSET_RM) +activeMsg.set_default_cmdtype(duml_cmdset.NEED_ACK_TYPE) + + +def get_action_state(): + activeMsg.init() + activeMsg.cmd_id = duml_cmdset.DUSS_MB_CMD_RM_1860_ACTIVE_STATE_GET + duss_result, resp = event_dji_system.send_sync(activeMsg) + if resp["data"][1] == 1: + return True + else: + return False + + +ACTIVE_FLAG = False +while ACTIVE_FLAG: + logger.fatal("DEVICE NOT BE ACTIVED!") + # ACTIVE_FLAG = get_action_state() + if ACTIVE_FLAG: + break + time.sleep(2) + +# register callback +logger.info("DJI SCRATCH REGISTER CALLBACKS..") +link_state_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 | duml_cmdset.DUSS_MB_CMD_RM_LINK_STATE_PUSH +) +get_version_id = ( + duml_cmdset.DUSS_MB_CMDSET_COMMON << 8 | duml_cmdset.DUSS_MB_CMD_GET_DEVICE_VERSION +) +download_data_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 | duml_cmdset.DUSS_MB_CMD_RM_SCRIPT_DOWNLOAD_DATA +) +download_finish_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 + | duml_cmdset.DUSS_MB_CMD_RM_SCRIPT_DOWNLOAD_FINSH +) +script_ctrl_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 | duml_cmdset.DUSS_MB_CMD_RM_SCRIPT_CTRL +) +custom_skill_config_query_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 + | duml_cmdset.DUSS_MB_CMD_RM_CUSTOM_SKILL_CONFIG_QUERY +) +auto_test_id = ( + duml_cmdset.DUSS_MB_CMDSET_RM << 8 | duml_cmdset.DUSS_MB_CMD_RM_SCRATCH_AUTO_TEST +) +update_sys_date_id = ( + duml_cmdset.DUSS_MB_CMDSET_COMMON << 8 | duml_cmdset.DUSS_MB_CMD_SET_DATE +) + +event_dji_system.async_req_register(link_state_id, script_process.get_link_state) +event_dji_system.async_req_register(get_version_id, script_process.request_get_version) +event_dji_system.async_req_register( + download_data_id, script_process.request_recv_script_file +) +event_dji_system.async_req_register( + download_finish_id, script_process.request_create_script_file +) +event_dji_system.async_req_register( + script_ctrl_id, script_process.request_ctrl_script_file +) +event_dji_system.async_req_register(auto_test_id, script_process.request_auto_test) +event_dji_system.async_req_register(update_sys_date_id, script_process.update_sys_date) +event_dji_system.async_req_register( + custom_skill_config_query_id, script_process.query_custom_skill_config +) + + +G_SCRIPT_FINISH = False + + +def QUIT_SIGNAL(signum, frame): + global G_SCRIPT_FINISH + logger.info("Signal handler called with signal = " + str(signum)) + G_SCRIPT_FINISH = True + return + + +signal.signal(signal.SIGTSTP, QUIT_SIGNAL) +signal.signal(signal.SIGTERM, QUIT_SIGNAL) +signal.signal(signal.SIGINT, QUIT_SIGNAL) + +logger.info("DJI SCRATCH ENTER MAINLOOP...") + +pingMsg = duss_event_msg.EventMsg(tools.hostid2senderid(event_dji_system.my_host_id)) +pingMsg.set_default_receiver(rm_define.mobile_id) +pingMsg.set_default_cmdset(duml_cmdset.DUSS_MB_CMDSET_RM) +pingMsg.set_default_cmdtype(duml_cmdset.REQ_PKG_TYPE) + + +def push_info_to_mobile(content): + pingMsg.init() + pingMsg.append("level", "uint8", 0) + pingMsg.append("length", "uint16", len(str(content))) + pingMsg.append("content", "string", str(content)) + pingMsg.cmd_id = duml_cmdset.DUSS_MB_CMD_RM_SCRIPT_LOG_INFO + event_dji_system.send_sync(pingMsg) + + +local_sub_service.enable() + +UNKNOW = 0 +PRO_ROBOMASTER_S1 = 1 +PRO_ROBOMASTER_S1_EDU = 2 + + +def is_sdk_enable(): + product_attri_req_msg = duss_event_msg.EventMsg( + tools.hostid2senderid(event_dji_system.my_host_id) + ) + product_attri_req_msg.set_default_receiver(rm_define.system_id) + product_attri_req_msg.set_default_cmdset(duml_cmdset.DUSS_MB_CMDSET_RM) + product_attri_req_msg.set_default_cmdtype(duml_cmdset.NEED_ACK_TYPE) + product_attri_req_msg.init() + product_attri_req_msg.cmd_id = duml_cmdset.DUSS_MB_CMD_RM_PRODUCT_ATTRIBUTE_GET + result, resp = event_dji_system.send_sync(product_attri_req_msg) + + if result == rm_define.DUSS_SUCCESS: + data = resp["data"] + ret_code = data[0] + if ret_code != 0: + logger.error("get product attribute failue, errcode=%d" % data[0]) + # return False + return True + pro = data[1] + # return pro == PRO_ROBOMASTER_S1_EDU + return True + else: + logger.info("Robot is S1") + # return False + return True + + +socket_ctrl = rm_socket.RmSocket() +uart_ctrl = rm_ctrl.SerialCtrl(event_dji_system) +script_ctrl.register_socket_obj(socket_ctrl) +script_ctrl.register_uart_obj(uart_ctrl) + +# TRY ENABLE SDK and determine whether the extension-part can be used in scratch function +try: + import sdk_manager + + sdk_manager_ctrl = sdk_manager.SDKManager(event_dji_system, socket_ctrl, uart_ctrl) + + retry_count = 3 + while retry_count > 0: + retry_count -= 1 + if is_sdk_enable(): + script_ctrl.set_edu_status(True) + modulesStatus_ctrl.set_edu_status(True) + sdk_manager_ctrl.enable_plaintext_sdk() + break + else: + time.sleep(1) + if retry_count <= 0: + del sdk_manager + script_ctrl.set_edu_status(False) + modulesStatus_ctrl.set_edu_status(False) +except Exception as e: + logger.fatal(e) + +socket_ctrl.init() + +while not G_SCRIPT_FINISH: + try: + time.sleep(5) + except Exception as e: + logger.fatal(traceback.format_exc()) + G_SCRIPT_FINISH = True + break + +script_ctrl.stop() +event_dji_system.stop() + +logger.info("DJI SCRATCH EXIT!!!") diff --git a/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/__init__.py b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1299c28a37ab7ea938ecd398af4d83d861ec9f8d --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/__init__.py @@ -0,0 +1,3 @@ +import protocal_parser + +PlaintextSDK = protocal_parser.ProtocalParser diff --git a/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_mapping_table.json b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_mapping_table.json new file mode 100644 index 0000000000000000000000000000000000000000..234a9c3e434750a692bd3c7e17dccc12df7860ac --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_mapping_table.json @@ -0,0 +1,311 @@ +{ + "stream" : { + "obj": "sdk_ctrl", + "functions" : { + "on" : { + "set" : ["stream_on"], + "get" : [] + }, + "off" : { + "set" : ["stream_off"], + "get" : [] + } + } + }, + "audio" : { + "obj": "sdk_ctrl", + "functions" : { + "on" : { + "set" : ["audio_on"], + "get" : [] + }, + "off" : { + "set" : ["audio_off"], + "get" : [] + } + } + }, + "game_msg" : { + "obj": "sdk_ctrl", + "functions" : { + "on" : { + "set" : ["game_push_on"], + "get" : [] + }, + "off" : { + "set" : ["game_push_off"], + "get" : [] + } + } + }, + "robot" : { + "obj": "robot_ctrl", + "functions" : { + "mode" : { + "set" : ["set_mode", "mode"], + "get" : ["get_mode"] + }, + "battery" : { + "set" : [], + "get" : ["get_battery_percentage"] + } + } + }, + "chassis" : { + "obj" : "chassis_ctrl", + "functions" : { + "speed" : { + "set" : ["update_move_speed", "x", "y", "z"], + "get" : ["get_move_speed"] + }, + "wheel" : { + "set" : ["update_wheel_speed", "w2", "w1", "w3", "w4"], + "get" : ["get_wheel_speed"] + }, + "move" : { + "set" : ["update_position_based_on_cur", "x", "y", "z", "vxy", "vz", "wait_for_complete"], + "get" : [] + }, + "position" : { + "set" : [], + "get" : ["get_position"] + }, + "attitude" : { + "set" : [], + "get" : ["get_attitude"] + }, + "status" : { + "set" : [], + "get" : ["get_status"] + }, + "push" : { + "set" : ["sdk_info_push_attr_set", "position", "pfreq", "attitude", "afreq", "status", "sfreq", "freq"], + "get" : [] + }, + "stop" : { + "set" : ["stop"], + "get" : [] + } + } + }, + "gimbal" : { + "obj" : "gimbal_ctrl", + "functions" : { + "speed" : { + "set" : ["update_speed", "p", "y"], + "get" : [] + }, + "move" : { + "set" : ["update_angle_based_on_cur", "p", "y", "vp", "vy", "wait_for_complete"], + "get" : [] + }, + "moveto" : { + "set" : ["update_angle_based_on_origin", "p", "y", "vp", "vy", "wait_for_complete"], + "get" : [] + }, + "attitude" : { + "set" : [], + "get" : ["get_angle"] + }, + "suspend" : { + "set" : ["suspend"], + "get" : [] + }, + "resume" : { + "set" : ["resume"], + "get" : [] + }, + "recenter" : { + "set" : ["recenter"], + "get" : [] + }, + "push" : { + "set" : ["sdk_info_push_attr_set", "attitude", "afreq", "freq"], + "get" : [] + }, + "stop" : { + "set" : ["stop"], + "get" : [] + } + } + }, + "blaster" : { + "obj" : "blaster_ctrl", + "functions" : { + "bead" : { + "set" : ["set_fire_count", "counter"], + "get" : ["get_fire_count"] + }, + "fire" : { + "set" : ["fire_once"], + "get" : [] + } + } + }, + "armor" : { + "obj" : "armor_ctrl", + "functions" : { + "sensitivity" : { + "set" : ["set_hit_sensitivity", "level"], + "get" : ["get_hit_sensitivity"] + }, + "event" : { + "set" : ["sdk_event_push_enable_flag_set", "hit", "reserve"], + "get" : [] + } + } + }, + "sound" : { + "obj" : "media_ctrl", + "functions" : { + "event" : { + "set" : ["sdk_event_push_enable_flag_set", "applause", "reserve"], + "get" : [] + } + } + }, + "pwm" : { + "obj" : "chassis_ctrl", + "functions" : { + "value" : { + "set" : ["set_pwm_value", "port", "data"], + "get" : [] + }, + "freq" : { + "set" : ["set_pwm_freq", "port", "data"], + "get" : [] + } + } + }, + "sensor_adapter" : { + "obj" : "sensor_adapter_ctrl", + "functions" : { + "adc" : { + "set" : [], + "get" : ["get_sensor_adapter_adc", "id", "port"] + }, + "io_level" : { + "set" : [], + "get" : ["get_sensor_adapter_io_level", "id", "port"] + }, + "pulse_period" : { + "set" : [], + "get" : ["get_sensor_adapter_pulse_period", "id", "port"] + }, + "event" : { + "set" : ["sdk_event_push_enable_flag_set", "io_level", "reserve"], + "get" : [] + } + } + }, + "ir_distance_sensor" : { + "obj" : "ir_distance_sensor_ctrl", + "functions" : { + "measure" : { + "set" : ["measure_ctrl", "enable"], + "get" : [] + }, + "distance" : { + "set" : [], + "get" : ["get_distance_info", "id"] + } + } + }, + "servo" : { + "obj" : "servo_ctrl", + "functions" : { + "angle" : { + "set" : ["set_angle", "id", "angle", "wait_for_complete"], + "get" : ["get_angle", "id"] + }, + "speed" : { + "set" : ["set_speed", "id", "speed"], + "get" : [] + }, + "recenter" : { + "set" : ["recenter", "id", "wait_for_complete"], + "get" : [] + }, + "stop" : { + "set" : ["stop", "id"], + "get" : [] + } + } + }, + "robotic_arm" : { + "obj" : "robotic_arm_ctrl", + "functions" : { + "move" : { + "set" : ["move", "x", "y", "wait_for_complete"], + "get" : [] + }, + "moveto" : { + "set" : ["moveto", "x", "y", "wait_for_complete"], + "get" : [] + }, + "position" : { + "set" : [], + "get" : ["get_position"] + }, + "recenter" : { + "set" : ["recenter", "wait_for_complete"], + "get" : [] + }, + "stop" : { + "set" : ["stop"], + "get" : [] + } + } + }, + "robotic_gripper" : { + "obj" : "gripper_ctrl", + "functions" : { + "open" : { + "set" : ["open", "level"], + "get" : [] + }, + "close" : { + "set" : ["close", "level"], + "get" : [] + }, + "status" : { + "set" : [], + "get" : ["get_status"] + }, + "stop" : { + "set" : ["stop"], + "get" : [] + } + } + }, + "led" : { + "obj" : "led_ctrl", + "functions" : { + "control" : { + "set" : ["update_led_t", "comp", "effect", "r", "g", "b", "blink_freq", "single_led_index"], + "get" : [] + } + } + }, + "AI" : { + "obj" : "AI_ctrl", + "functions" : { + "push" : { + "set" : ["ctrl_detection", "people", "pose", "line", "marker", "robot", "freq"], + "get" : [] + }, + "attribute" : { + "set" : ["attr_set", "line_color", "marker_color", "marker_dist"] + } + } + }, + "camera" : { + "obj" : "media_ctrl", + "functions" : { + "exposure" : { + "set" : ["exposure_value_update_sdk", "ev"], + "get" : [] + } + } + } +} diff --git a/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_parser.py b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..1b92448ef06dc0d70a60ce8727e73792c7e08b86 --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/sdk/plaintext_sdk/protocal_parser.py @@ -0,0 +1,855 @@ +import queue +import threading +import time +import json +import traceback +import os +import re + +import event_client +import rm_ctrl +import rm_define +import rm_log +import tools + +import rm_socket + +logger = rm_log.dji_scratch_logger_get() + +PROTOCAL_MAPPING_TABLE_PATH = os.path.dirname(__file__) + "/protocal_mapping_table.json" + +COMMAND_PORT = 40923 +PUSH_PORT = 40924 +EVENT_PORT = 40925 +BROADCAST_PORT = 40926 + +INADDR_ANY = "0.0.0.0" +WIFI_DIRECT_CONNECTION_IP = "192.168.2.1" + + +class ProtocalParser(object): + UART = "uart" + NETWORK = "network" + + def __init__(self, event_dji_system, socket_obj, uart_obj): + self.event_client = event_dji_system + self.sdk_ctrl = rm_ctrl.SDKCtrl(event_dji_system) + self.version = "" + + self.socket_obj = socket_obj + self.uart_obj = uart_obj + self.connection_obj = None + + self.command_socket_fd = None + self.event_socket_fd = None + self.push_socket_fd = None + + self.remote_host_ip = set() + self.connection_socket_fd = {} + + self.data_queue = queue.Queue(512) + self.uart_data_t = "" + self.socket_data_t = "" + + # make command exec order + # if there is command has been execed + # will return error when user send command + # support 'command1; command2;' to order run many commands + self.command_execing_event = threading.Event() + + self.command_parser_callback = { + "command": self.command_protocal_format_parser, + "version": self.version_protocal_format_parser, + "quit": self.quit_protocal_format_parser, + } + + self.data_process_thread = None + + self.protocal_mapping_table = None + + self.sdk_mode = False + + self.ctrl_obj = {} + + self.report_local_host_ip_timer = None + + def init(self, config={}): + self.config = config + + f = open(PROTOCAL_MAPPING_TABLE_PATH, "r") + self.protocal_mapping_table = json.load(f) + f.close() + + self.command_socket_fd = self.socket_obj.create( + self.socket_obj.TCP_MODE, + (INADDR_ANY, COMMAND_PORT), + server=True, + recv_msgq_size=8, + send_msgq_size=8, + connected_callback=self.__command_connected_callback, + disconnected_callback=self.__command_disconnected_callback, + ) + if self.command_socket_fd: + # TODO: handle the error + logger.info("command socket create successfully.") + + self.event_socket_fd = self.socket_obj.create( + self.socket_obj.TCP_MODE, + (INADDR_ANY, EVENT_PORT), + server=True, + recv_msgq_size=8, + send_msgq_size=8, + connected_callback=self.__event_connected_callback, + ) + if self.event_socket_fd: + logger.info("event socket create successfully.") + + self.push_socket_fd = self.socket_obj.create( + self.socket_obj.UDP_MODE, + (INADDR_ANY, PUSH_PORT), + server=False, + recv_msgq_size=1, + send_msgq_size=8, + ) + if self.push_socket_fd: + logger.info("push socket create successfully.") + + self.broadcast_socket_fd = self.socket_obj.create( + self.socket_obj.UDP_MODE, + (INADDR_ANY, BROADCAST_PORT), + server=False, + recv_msgq_size=1, + send_msgq_size=8, + ) + + if self.broadcast_socket_fd: + self.socket_obj.set_udp_default_target_addr( + self.broadcast_socket_fd, ("", BROADCAST_PORT) + ) + logger.info("broadcast socket create successfully.") + + self.ctrl_obj = {} + + if self.report_local_host_ip_timer == None: + self.report_local_host_ip_timer = tools.get_timer( + 2, self.report_local_host_ip + ) + self.report_local_host_ip_timer.start() + + self.uart_obj.sdk_process_callback_register(self.__uart_command_recv_callback) + + def __event_connected_callback(self, fd, new_fd): + logger.info("New event connected") + self.socket_obj.update_socket_info( + new_fd, + recv_msgq_size=1, + send_msgq_size=8, + ) + if fd not in self.connection_socket_fd.keys(): + self.connection_socket_fd[fd] = [] + + self.connection_socket_fd[fd].append(new_fd) + + def __event_recv_callback(self, fd, data): + pass + + def __event_disconnected_callback(self, fd): + pass + + def __command_connected_callback(self, fd, new_fd): + if self.connection_obj == self.uart_obj: + logger.info("Uart has already connected") + return + else: + logger.info("New command connected") + self.connection_status_report("connected", fd, new_fd) + self.socket_obj.update_socket_info( + new_fd, + recv_msgq_size=8, + send_msgq_size=8, + recv_callback=self.__command_recv_callback, + ) + + self.remote_host_ip.add(self.socket_obj.get_remote_host_ip(new_fd)) + + if fd not in self.connection_socket_fd.keys(): + self.connection_socket_fd[fd] = [] + self.connection_socket_fd[fd].append(new_fd) + + def __command_recv_callback(self, fd, data): + if self.connection_obj == self.uart_obj: + logger.info("Uart has already connected") + return + else: + self.socket_data_t += data + if ";" in self.socket_data_t: + data_list = self.socket_data_t.split(";") + + # tail symbal is invalid, whether the data is end of ';' or incomplete command, so pop and save it + self.socket_data_t = data_list.pop(-1) + + for msg in data_list: + self.protocal_parser(fd, msg, self.NETWORK) + else: + logger.info("Not found ; in data_list, waitting for next data") + return + + def __command_disconnected_callback(self, fd): + self.quit_protocal_format_parser(self.NETWORK, fd, None) + self.connection_status_report("disconnected", fd, None) + + def __uart_command_recv_callback(self, data): + logger.info(data) + if self.connection_obj == self.socket_obj: + logger.info("Network has already connected") + else: + self.uart_data_t += data + + if ";" in self.uart_data_t: + data_list = self.uart_data_t.split(";") + + # tail symbal is invalid, whether the data is end of ';' or incomplete command, so pop and save it + self.uart_data_t = data_list.pop(-1) + + logger.info(data_list) + for msg in data_list: + self.protocal_parser(None, msg, self.UART) + else: + logger.info("Not found ; in data_list, waitting for next data") + return + + def command_execing_start(self): + self.command_execing_event.set() + + def command_execing_is_finish(self): + self.command_execing_event.is_set() + + def command_execing_finish(self): + self.command_execing_event.clear() + + def report_local_host_ip(self): + ip = self.socket_obj.get_local_host_ip() + if ip and tools.is_station_mode(): + self.socket_obj.send(self.broadcast_socket_fd, "robot ip %s" % ip) + + def sdk_robot_ctrl(self, ctrl): + def init(): + self.ctrl_obj["event"] = event_client.EventClient() + self.ctrl_obj["modulesStatus_ctrl"] = rm_ctrl.ModulesStatusCtrl( + self.ctrl_obj["event"] + ) + self.ctrl_obj["blaster_ctrl"] = rm_ctrl.GunCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["armor_ctrl"] = rm_ctrl.ArmorCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["AI_ctrl"] = rm_ctrl.VisionCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["chassis_ctrl"] = rm_ctrl.ChassisCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["gimbal_ctrl"] = rm_ctrl.GimbalCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["robot_ctrl"] = rm_ctrl.RobotCtrl( + self.ctrl_obj["event"], + self.ctrl_obj["chassis_ctrl"], + self.ctrl_obj["gimbal_ctrl"], + ) + self.ctrl_obj["led_ctrl"] = rm_ctrl.LedCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["media_ctrl"] = rm_ctrl.MediaCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["mobile_ctrl"] = rm_ctrl.MobileCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["tools"] = rm_ctrl.RobotTools(self.ctrl_obj["event"]) + self.ctrl_obj["sensor_adapter_ctrl"] = rm_ctrl.SensorAdapterCtrl( + self.ctrl_obj["event"] + ) + self.ctrl_obj["ir_distance_sensor_ctrl"] = rm_ctrl.IrDistanceSensorCtrl( + self.ctrl_obj["event"] + ) + self.ctrl_obj["servo_ctrl"] = rm_ctrl.ServoCtrl(self.ctrl_obj["event"]) + self.ctrl_obj["robotic_arm_ctrl"] = rm_ctrl.RoboticArmCtrl( + self.ctrl_obj["event"] + ) + self.ctrl_obj["gripper_ctrl"] = rm_ctrl.RoboticGripperCtrl( + self.ctrl_obj["event"] + ) + self.ctrl_obj["sdk_ctrl"] = rm_ctrl.SDKCtrl(self.ctrl_obj["event"]) + # log_ctrl = rm_ctrl.LogCtrl(event) + + def ready(): + self.ctrl_obj["robot_ctrl"].init() + self.ctrl_obj["modulesStatus_ctrl"].init() + self.ctrl_obj["gimbal_ctrl"].init() + self.ctrl_obj["chassis_ctrl"].init() + self.ctrl_obj["led_ctrl"].init() + self.ctrl_obj["blaster_ctrl"].init() + self.ctrl_obj["mobile_ctrl"].init() + self.ctrl_obj["servo_ctrl"].init() + self.ctrl_obj["ir_distance_sensor_ctrl"].init() + self.ctrl_obj["tools"].init() + + self.ctrl_obj["robot_ctrl"].enable_sdk_mode() + self.ctrl_obj["robot_ctrl"].set_mode(rm_define.robot_mode_gimbal_follow) + self.ctrl_obj["chassis_ctrl"].stop() + self.ctrl_obj["tools"].program_timer_start() + + self.ctrl_obj["AI_ctrl"].sdk_info_push_callback_register( + self.AI_info_push_callback + ) + self.ctrl_obj["armor_ctrl"].sdk_event_push_callback_register( + self.armor_event_push_callback + ) + self.ctrl_obj["media_ctrl"].sdk_event_push_callback_register( + self.applause_event_push_callback + ) + self.ctrl_obj["chassis_ctrl"].sdk_info_push_callback_register( + self.chassis_info_push_callback + ) + self.ctrl_obj["gimbal_ctrl"].sdk_info_push_callback_register( + self.gimbal_info_push_callback + ) + self.ctrl_obj["sensor_adapter_ctrl"].sdk_event_push_callback_register( + self.io_level_event_push_callback + ) + self.ctrl_obj["sdk_ctrl"].sdk_info_push_callback_register( + self.youth_competition_msg_push_callback + ) + + def stop(): + self.ctrl_obj["blaster_ctrl"].stop() + self.ctrl_obj["chassis_ctrl"].stop() + self.ctrl_obj["gimbal_ctrl"].stop() + self.ctrl_obj["media_ctrl"].stop() + self.ctrl_obj["AI_ctrl"].stop() + self.ctrl_obj["armor_ctrl"].stop() + + def exit(): + stop() + self.ctrl_obj["robot_ctrl"].disable_sdk_mode() + self.ctrl_obj["robot_ctrl"].exit() + self.ctrl_obj["gimbal_ctrl"].exit() + self.ctrl_obj["chassis_ctrl"].exit() + self.ctrl_obj["blaster_ctrl"].exit() + self.ctrl_obj["mobile_ctrl"].exit() + self.ctrl_obj["armor_ctrl"].exit() + self.ctrl_obj["media_ctrl"].exit() + self.ctrl_obj["sdk_ctrl"].exit() + self.ctrl_obj["ir_distance_sensor_ctrl"].exit() + self.ctrl_obj["sensor_adapter_ctrl"].exit() + self.ctrl_obj["servo_ctrl"].exit() + self.ctrl_obj["gripper_ctrl"].exit() + self.ctrl_obj["event"].stop() + self.ctrl_obj.clear() + + if ctrl == "init": + init() + elif ctrl == "ready": + ready() + elif ctrl == "stop": + stop() + elif ctrl == "exit": + exit() + + def __data_process(self): + self.sdk_robot_ctrl("init") + self.sdk_robot_ctrl("ready") + + while self.sdk_mode: + result = False + try: + fd, data = self.data_queue.get(timeout=1) + except queue.Empty: + continue + self.command_execing_start() + if data.req_type == "set": + cmd = str(data.obj) + "." + str(data.function) + str(data.param) + + logger.info(cmd) + + try: + result = eval(cmd, self.ctrl_obj) + + except Exception as e: + logger.fatal(traceback.format_exc()) + self.ack(fd, "fail", data.seq) + continue + if ( + (type(result) == tuple and result[-1] is 0) + or (type(result) == bool and result == True) + or result == None + or result is 0 + ): + self.ack(fd, "ok", data.seq) + else: + self.ack(fd, "fail", data.seq) + logger.fatal( + "process : " + + str(data.obj) + + "." + + str(data.function) + + str(data.param) + + " exec_result:" + + str(result) + ) + elif data.req_type == "get": + if data.param == None: + cmd = str(data.obj) + "." + str(data.function) + "()" + else: + cmd = str(data.obj) + "." + str(data.function) + str(data.param) + + logger.info(cmd) + + try: + result = eval(cmd, self.ctrl_obj) + + except Exception as e: + logger.fatal(traceback.format_exc()) + self.ack(fd, "fail", data.seq) + seq = data.seq + data = "" + if type(result) == tuple or type(result) == list: + for i in result: + if type(i) == float: + data = data + "%.3f" % i + " " + else: + data = data + str(i) + " " + else: + data = str(result) + " " + self.ack(fd, data, seq) + else: + time.sleep(0.05) + self.command_execing_finish() + + self.sdk_robot_ctrl("exit") + + def protocal_parser(self, fd, data, mode=None): + # command + logger.info("Recv string: %s" % (data)) + command = data.split(" ") + + if len(command) == 0: + return + + # find 'seq' + seq = None + if "seq" in command: + seq_pos = command.index("seq") + if len(command) > seq_pos + 1: + seq = command[seq_pos + 1] + if seq.isdigit(): + seq = int(seq) + elif re.match(r"^0x[0-9a-fA-F]+$", seq): + seq = int(seq, 16) + else: + self.ack(fd, "command format error: seq parse error") + else: + self.ack(fd, "command format error: no seq value") + command = command[0:seq_pos] + + if self.command_execing_is_finish(): + self.ack(fd, "error", seq) + return False + + # check protocal format + command_obj = command[0] + + # call process function + if command_obj in self.command_parser_callback.keys(): + result = self.command_parser_callback[command_obj](mode, fd, seq) + if result == False or result == None: + self.ack(fd, "%s exec error" % command_obj, seq) + elif result == True: + self.ack(fd, "ok", seq) + else: + self.ack(fd, result, seq) + else: + if not self.sdk_mode: + self.ack(fd, "not in sdk mode", seq) + return False + result = self.ctrl_protocal_format_parser(command, seq) + if result == False or result == None: + self.ack(fd, "command format error: command parse error", seq) + else: + if not self.data_queue.full(): + try: + self.data_queue.put_nowait((fd, result)) + except Exception as e: + # full ? + logger.fatal(e) + + def command_protocal_format_parser(self, mode, fd, seq): + if self.sdk_mode == False: + self.sdk_mode = True + if ( + self.data_process_thread == None + or self.data_process_thread.is_alive() == False + ): + self.data_process_thread = threading.Thread(target=self.__data_process) + self.data_process_thread.start() + + if ( + self.report_local_host_ip_timer + and self.report_local_host_ip_timer.is_start() + ): + self.report_local_host_ip_timer.join() + self.report_local_host_ip_timer.stop() + + if mode == self.UART: + self.connection_obj = self.uart_obj + self.uart_data_t = "" + elif mode == self.NETWORK: + self.connection_obj = self.socket_obj + self.socket_data_t = "" + + return True + else: + return "Already in SDK mode" + + def version_protocal_format_parser(self, mode, fd, seq): + if "version" in self.config.keys(): + return "version " + self.config["version"] + + def quit_protocal_format_parser(self, mode, fd, seq): + if self.data_process_thread and self.data_process_thread.is_alive(): + if self.report_local_host_ip_timer == None: + self.report_local_host_ip_timer = tools.get_timer( + 2, self.connection_obj.report_local_host_ip + ) + self.report_local_host_ip_timer.start() + else: + self.report_local_host_ip_timer.start() + self.sdk_mode = False + self.data_process_thread.join() + self.ack(fd, "ok", seq) + if mode: + self.connection_obj = None + self.socket_data_t = "" + self.uart_data_t = "" + return True + else: + self.ack(fd, "quit sdk mode failed", seq) + if mode: + self.connection_obj = None + return False + + def ctrl_protocal_format_parser(self, command, seq): + cmdpkg = CommandPackage() + cmdpkg.seq = seq + + try: + # get object + obj = command[0] + if obj in self.protocal_mapping_table.keys(): + cmdpkg.obj = self.protocal_mapping_table[obj]["obj"] + else: + logger.error("obj parse error") + return False + + # get function key + function = command[1] + if function in self.protocal_mapping_table[obj]["functions"].keys(): + function_dict = self.protocal_mapping_table[obj]["functions"][function] + + # check if get command + if "?" in command: + params_list = command[2:] + if "?" in params_list: + params_list.remove("?") + cmdpkg.function = function_dict["get"][0] + cmdpkg.req_type = "get" + params = [] + + """ + if len(function_dict['get'][1:]) != 0 and len(params_list) != 0: + cmdpkg.param = tuple(params_list[0:len(function_dict['get'][1:])]) + """ + + for param in function_dict["get"][1:]: + # handle the first param is status bit + if len(function_dict["get"][1:]) == 1: + value = None + if len(params_list) == 0: + value = None + elif len(params_list) == 1: + value = params_list[0] + elif params_list[0] == function_dict["get"][1:][0]: + value = params_list[1] + if value and value.isdigit(): + value = int(value) + elif re.match(r"^0x[0-9a-fA-F]+$", value): + value = int(value, 16) + elif value == "True" or value == "true": + value = True + elif value == "False" or value == "false": + value = False + else: + try: + value = float(value) + except Exception as e: + pass + params.append(value) + break + + # check params + if param in params_list and params_list.index(param) + 1 < len( + params_list + ): + value = params_list[params_list.index(param) + 1] + if value and value.isdigit(): + value = int(value) + elif re.match(r"^0x[0-9a-fA-F]+$", value): + value = int(value, 16) + elif value == "True" or value == "true": + value = True + elif value == "False" or value == "false": + value = False + else: + try: + value = float(value) + except Exception as e: + pass + params.append(value) + else: + params.append(None) + + cmdpkg.param = tuple(params) + logger.info(cmdpkg.param) + + # set command + else: + # get params list + params_list = command[2:] + cmdpkg.function = function_dict["set"][0] + cmdpkg.req_type = "set" + params = [] + + for param in function_dict["set"][1:]: + # handle the first param is status bit + if len(function_dict["set"][1:]) == 1: + value = None + if len(params_list) == 0: + value = None + elif len(params_list) == 1: + value = params_list[0] + elif len(params_list) == 2: + value = params_list[1] + if value and value.isdigit(): + value = int(value) + elif value and re.match(r"^0x[0-9a-fA-F]+$", value): + value = int(value, 16) + elif value == "True" or value == "true": + value = True + elif value == "False" or value == "false": + value = False + else: + try: + value = float(value) + except Exception as e: + pass + params.append(value) + break + + # check params + if param in params_list and params_list.index(param) + 1 < len( + params_list + ): + value = params_list[params_list.index(param) + 1] + if value.isdigit(): + value = int(value) + elif re.match(r"^0x[0-9a-fA-F]+$", value): + value = int(value, 16) + elif value == "True" or value == "true": + value = True + elif value == "False" or value == "false": + value = False + else: + try: + value = float(value) + except Exception as e: + pass + params.append(value) + else: + params.append(None) + + cmdpkg.param = tuple(params) + logger.info(cmdpkg.param) + else: + logger.error("function key parse error") + return False + except Exception as e: + logger.fatal(traceback.format_exc()) + return False + + return cmdpkg + + def connection_status_report(self, status, fd, data): + logger.info( + "connect status changed, local host ip info : %s remote host ip info: %s, cur status: %s" + % ( + self.socket_obj.get_local_host_ip(data), + self.socket_obj.get_remote_host_ip(data), + status, + ) + ) + mode = "wifi" + if data != None: + ip = self.socket_obj.get_local_host_ip(data) + if ip == tools.get_ip_by_dev_name("wlan0"): + mode = "wifi" + elif ip == tools.get_ip_by_dev_name("rndis0"): + mode = "rndis" + logger.info("connect mode: %s" % (mode)) + + if status == "connected": + self.sdk_ctrl.sdk_on(mode) + elif status == "disconnected": + self.sdk_ctrl.sdk_off() + + def armor_event_push_callback(self, event): + if len(event) == 0: + return + + msg = "armor event" + if "hit" in event.keys(): + msg += " hit %d %d ;" % (event["hit"]) + self.send("event", msg) + + def applause_event_push_callback(self, event): + if len(event) == 0: + return + + msg = "sound event" + if "applause" in event.keys(): + msg += " applause %d ;" % (event["applause"]) + self.send("event", msg) + + def io_level_event_push_callback(self, event): + if len(event) == 0: + return + + msg = "sensor_adapter event" + if "io_level" in event.keys(): + msg += " io_level %d ;" % (event["io_level"]) + self.send("event", msg) + + def chassis_position_info_push_callback(self, x, y): + pass + + def chassis_info_push_callback(self, info): + if len(info) == 0: + return + + msg = "chassis push" + if "position" in info.keys(): + msg += " position %.3f %.3f ;" % (info["position"]) + if "attitude" in info.keys(): + msg += " attitude %.3f %.3f %.3f ;" % (info["attitude"]) + if "status" in info.keys(): + msg += " status %d %d %d %d %d %d %d %d %d %d %d ;" % (info["status"]) + self.send("push", msg) + + def gimbal_info_push_callback(self, info): + if len(info) == 0: + return + + msg = "gimbal push" + if "attitude" in info.keys(): + msg += " attitude %.3f %.3f ;" % (info["attitude"]) + self.send("push", msg) + + def AI_info_push_callback(self, info): + if len(info) == 0: + return + msg = "AI push" + if "people" in info.keys(): + msg += " people %d" % len(info["people"]) + for i in info["people"]: + msg += " %.3f %.3f %.3f %.3f" % (i.pos.x, i.pos.y, i.size.w, i.size.h) + if "pose" in info.keys(): + msg += " pose %d" % len(info["pose"]) + for i in info["pose"]: + msg += " %d %.3f %.3f %.3f %.3f" % ( + i.info, + i.pos.x, + i.pos.y, + i.size.w, + i.size.h, + ) + if "marker" in info.keys(): + msg += " marker %d" % len(info["marker"]) + for i in info["marker"]: + msg += " %d %.3f %.3f %.3f %.3f" % ( + i.info, + i.pos.x, + i.pos.y, + i.size.w, + i.size.h, + ) + if "line" in info.keys(): + msg += " line %d" % int(len(info["line"]) / 10) + for i in info["line"]: + msg += " %.3f %.3f %.3f %.3f" % (i.pos.x, i.pos.y, i.size.w, i.size.h) + if "robot" in info.keys(): + msg += " robot %d" % len(info["robot"]) + for i in info["robot"]: + msg += " %.3f %.3f %.3f %.3f" % (i.pos.x, i.pos.y, i.size.w, i.size.h) + + self.send("push", msg) + + def gimbal_status_info_push_callback(self): + pass + + def youth_competition_msg_push_callback(self, info): + if len(info) == 0: + logger.error("SYS_GAME : msg is none") + return + msg = "game msg push " + if "data" in info["game_msg"].keys(): + msg += str(info["game_msg"]["data"]) + self.send("push", msg) + + def ack(self, fd, data, seq=None): + msg = data + if seq != None: + msg += " seq %s" % (str(seq)) + + msg += ";" + + if self.connection_obj: + self.connection_obj.send(fd, msg) + + def req(self): + pass + + def send(self, obj, data): + fd = None + + data += ";" + + if self.connection_obj == self.uart_obj: + self.connection_obj.send(None, data) + else: + if obj == "command": + if self.connection_obj: + return self.connection_obj.send(self.command_socket_fd, data) + else: + return None + elif obj == "event": + logger.info(self.connection_socket_fd) + for user_fd in self.connection_socket_fd[self.event_socket_fd]: + if self.connection_obj: + self.connection_obj.send(user_fd, data) + return 0 + elif obj == "push": + for ip in self.remote_host_ip: + if self.connection_obj: + self.connection_obj.send( + self.push_socket_fd, data, (ip, PUSH_PORT) + ) + return 0 + else: + return None + + def recv(self): + pass + + +class CommandPackage(object): + def __init__(self): + self.obj = None + self.function = None + self.param = None + self.seq = None + self.req_type = None diff --git a/dora-robomaster/s1_SDK/dji_scratch/sdk/sdk_manager.py b/dora-robomaster/s1_SDK/dji_scratch/sdk/sdk_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..696c39defe8ca9db139cef2ea72555fc1b9b126f --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/sdk/sdk_manager.py @@ -0,0 +1,29 @@ +import os +import plaintext_sdk + + +class SDKManager(object): + def __init__(self, event_client, socket_obj, uart_obj): + self.plaintext_sdk = plaintext_sdk.PlaintextSDK( + event_client, socket_obj, uart_obj + ) + self.plaintext_sdk_config = {} + self.load_cfg() + + def load_cfg(self): + # load version + cur_dir = os.path.dirname(__file__) + f = open(cur_dir + "/version.txt") + version_ori = f.readlines() + f.close() + + version = "" + for i in version_ori: + version = version + "%.2d." % int(i.split(" ")[-1][0:-1]) + + version = version[0:-1] + + self.plaintext_sdk_config["version"] = version + + def enable_plaintext_sdk(self): + self.plaintext_sdk.init(self.plaintext_sdk_config) diff --git a/dora-robomaster/s1_SDK/dji_scratch/sdk/version.txt b/dora-robomaster/s1_SDK/dji_scratch/sdk/version.txt new file mode 100644 index 0000000000000000000000000000000000000000..24c03d2a67632b8e6627dc459b4d8b39b976be52 --- /dev/null +++ b/dora-robomaster/s1_SDK/dji_scratch/sdk/version.txt @@ -0,0 +1,4 @@ +#SDK_VERSION_MAJOR 0 +#SDK_VERSION_MINOR 0 +#SDK_VERSION_REVISION 00 +#SDK_VERSION_BUILD 70 diff --git a/dora-robomaster/s1_SDK/patch.sh b/dora-robomaster/s1_SDK/patch.sh new file mode 100644 index 0000000000000000000000000000000000000000..aaddd7749b3ac7925b036d4cf497912340c1fcff --- /dev/null +++ b/dora-robomaster/s1_SDK/patch.sh @@ -0,0 +1,22 @@ +#!/system/bin/sh + +# Run adbd for convenience. Not required to get SDK support. +/system/bin/adb_en.sh & + +# Stop affected services. +stop dji_sys +stop dji_hdvt_uav +stop dji_vision + +# Overwrite S1's dji.json with EP's one. Use a bind mount as the file is in the +# system partition. +mount -o bind /data/dji.json /system/etc/dji.json + +# This allows accessing the robot with DJI's binary protocol on port 20020. +mount -o bind /data/dji_hdvt_uav /system/bin/dji_hdvt_uav + +# Restart services. +start dji_sys +start dji_hdvt_uav +start dji_vision + diff --git a/dora-robomaster/s1_SDK/upload.sh b/dora-robomaster/s1_SDK/upload.sh new file mode 100644 index 0000000000000000000000000000000000000000..565f7d3e6addbf36ce92f7ce573cb0048620107c --- /dev/null +++ b/dora-robomaster/s1_SDK/upload.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +adb shell rm -rf /data/dji_scratch/sdk +adb push dji_scratch/sdk /data/dji_scratch/. + +adb push dji_scratch/bin/dji_scratch.py /data/dji_scratch/bin/. + +adb push dji.json /data/. + +adb push dji_hdvt_uav /data/. +adb shell chmod 755 /data/dji_hdvt_uav + +adb push patch.sh /data/. + diff --git a/examples/benchmark/dataflow.yml b/examples/benchmark/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..b24a4b7b60df4db3da87253f5a834a2ef22a44b6 --- /dev/null +++ b/examples/benchmark/dataflow.yml @@ -0,0 +1,15 @@ +nodes: + - id: rust-node + custom: + build: cargo build -p benchmark-example-node --release + source: ../../target/release/benchmark-example-node + outputs: + - latency + - throughput + - id: rust-sink + custom: + build: cargo build -p benchmark-example-sink --release + source: ../../target/release/benchmark-example-sink + inputs: + latency: rust-node/latency + throughput: rust-node/throughput diff --git a/examples/benchmark/node/Cargo.toml b/examples/benchmark/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ccc55c4adfcc71b4a452c8dcf683e6347949fac6 --- /dev/null +++ b/examples/benchmark/node/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "benchmark-example-node" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true } +eyre = "0.6.8" +futures = "0.3.21" +rand = "0.8.5" +tokio = { version = "1.20.1", features = ["rt", "macros"] } +tracing = "0.1.36" +tracing-subscriber = "0.3.15" diff --git a/examples/benchmark/node/src/main.rs b/examples/benchmark/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..9080bbc559fcdd0ac5ddc0b81a9e25c6362ed2cc --- /dev/null +++ b/examples/benchmark/node/src/main.rs @@ -0,0 +1,71 @@ +use dora_node_api::{self, dora_core::config::DataId, DoraNode}; +use eyre::Context; +use rand::Rng; +use std::time::Duration; +use tracing_subscriber::Layer; + +fn main() -> eyre::Result<()> { + set_up_tracing().wrap_err("failed to set up tracing subscriber")?; + + let latency = DataId::from("latency".to_owned()); + let throughput = DataId::from("throughput".to_owned()); + + let (mut node, _events) = DoraNode::init_from_env()?; + let sizes = [ + 0, + 8, + 64, + 512, + 2048, + 4096, + 4 * 4096, + 10 * 4096, + 100 * 4096, + 1000 * 4096, + ]; + + // test latency first + for size in sizes { + for _ in 0..100 { + let data: Vec = rand::thread_rng() + .sample_iter(rand::distributions::Standard) + .take(size) + .collect(); + node.send_output_raw(latency.clone(), Default::default(), data.len(), |out| { + out.copy_from_slice(&data); + })?; + + // sleep a bit to avoid queue buildup + std::thread::sleep(Duration::from_millis(10)); + } + } + + // wait a bit to ensure that all throughput messages reached their target + std::thread::sleep(Duration::from_secs(2)); + + // then throughput with full speed + for size in sizes { + for _ in 0..100 { + let data: Vec = rand::thread_rng() + .sample_iter(rand::distributions::Standard) + .take(size) + .collect(); + node.send_output_raw(throughput.clone(), Default::default(), data.len(), |out| { + out.copy_from_slice(&data); + })?; + } + } + + Ok(()) +} + +fn set_up_tracing() -> eyre::Result<()> { + use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; + + let stdout_log = tracing_subscriber::fmt::layer() + .pretty() + .with_filter(tracing::metadata::LevelFilter::DEBUG); + let subscriber = tracing_subscriber::Registry::default().with(stdout_log); + tracing::subscriber::set_global_default(subscriber) + .context("failed to set tracing global subscriber") +} diff --git a/examples/benchmark/run.rs b/examples/benchmark/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..78e6d88b526ab37aa12ec6c7e8d83f77bfee4f62 --- /dev/null +++ b/examples/benchmark/run.rs @@ -0,0 +1,46 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("benchmark-runner").wrap_err("failed to set up tracing subscriber")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + let dataflow = Path::new("dataflow.yml"); + build_dataflow(dataflow).await?; + + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn build_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--").arg("build").arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to build dataflow"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/benchmark/sink/Cargo.toml b/examples/benchmark/sink/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..940361ac3afc8aba9dfd24abb0ba4d16bc2dfcee --- /dev/null +++ b/examples/benchmark/sink/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "benchmark-example-sink" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true } +eyre = "0.6.8" +tracing = "0.1.36" +tracing-subscriber = "0.3.15" diff --git a/examples/benchmark/sink/src/main.rs b/examples/benchmark/sink/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..154b47d246af3f926a4e43016bc760e6e4014f51 --- /dev/null +++ b/examples/benchmark/sink/src/main.rs @@ -0,0 +1,98 @@ +use dora_node_api::{self, DoraNode, Event}; +use eyre::Context; +use std::time::{Duration, Instant}; +use tracing_subscriber::Layer; + +fn main() -> eyre::Result<()> { + set_up_tracing().wrap_err("failed to set up tracing subscriber")?; + + let (_node, mut events) = DoraNode::init_from_env()?; + + // latency is tested first + let mut latency = true; + + let mut current_size = 0; + let mut n = 0; + let mut start = Instant::now(); + let mut latencies = Vec::new(); + + println!("Latency:"); + + while let Some(event) = events.recv() { + match event { + Event::Input { id, metadata, data } => { + // check if new size bracket + let data_len = data.len(); + if data_len != current_size { + if n > 0 { + record_results(start, current_size, n, latencies, latency); + } + current_size = data_len; + n = 0; + start = Instant::now(); + latencies = Vec::new(); + } + + match id.as_str() { + "latency" if latency => {} + "throughput" if latency => { + latency = false; + println!("Throughput:"); + } + "throughput" => {} + other => { + eprintln!("Ignoring unexpected input `{other}`"); + continue; + } + } + + n += 1; + latencies.push( + metadata + .timestamp() + .get_time() + .to_system_time() + .elapsed() + .unwrap_or_default(), + ); + } + Event::InputClosed { id } => { + println!("Input `{id}` was closed"); + } + other => eprintln!("Received unexpected input: {other:?}"), + } + } + + record_results(start, current_size, n, latencies, latency); + + Ok(()) +} + +fn record_results( + start: Instant, + current_size: usize, + n: u32, + latencies: Vec, + latency: bool, +) { + let msg = if latency { + let avg_latency = latencies.iter().sum::() / n; + format!("size {current_size:<#8x}: {avg_latency:?}") + } else { + let duration = start.elapsed(); + let msg_per_sec = n as f64 / duration.as_secs_f64(); + format!("size {current_size:<#8x}: {msg_per_sec:.0} messages per second") + }; + println!("{msg}"); +} + +fn set_up_tracing() -> eyre::Result<()> { + use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; + + let stdout_log = tracing_subscriber::fmt::layer() + .pretty() + .with_filter(tracing::metadata::LevelFilter::DEBUG); + let subscriber = tracing_subscriber::Registry::default().with(stdout_log); + tracing::subscriber::set_global_default(subscriber) + .context("failed to set tracing global subscriber") +} diff --git a/examples/c++-dataflow/.gitignore b/examples/c++-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5761abcfdf0c26a75374c945dfe366eaeee04285 --- /dev/null +++ b/examples/c++-dataflow/.gitignore @@ -0,0 +1 @@ +*.o diff --git a/examples/c++-dataflow/README.md b/examples/c++-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..100ac99e94d5f48eab87956db8729dc90d9b48e6 --- /dev/null +++ b/examples/c++-dataflow/README.md @@ -0,0 +1,57 @@ +# Dora C++ Dataflow Example + +This example shows how to create dora operators and custom nodes with C++. + +Dora does not provide a C++ API yet, but we can create adapters for either the C or Rust API. The `operator-rust-api` and `node-rust-api` folders implement an example operator and node based on dora's Rust API, using the `cxx` crate for bridging. The `operator-c-api` and `node-c-api` show how to create operators and nodes based on dora's C API. Both approaches work, so you can choose the API that fits your application better. + +## Compile and Run + +To try it out, you can use the [`run.rs`](./run.rs) binary. It performs all required build steps and then starts the dataflow. Use the following command to run it: `cargo run --example cxx-dataflow`. + +For a manual build, follow these steps: + +- Create a `build` folder in this directory (i.e., next to the `node.c` file) +- Build the `cxx-dataflow-example-node-rust-api` and `cxx-dataflow-example-operator-rust-api` crates: + ``` + cargo build -p cxx-dataflow-example-node-rust-api --release + cargo build -p cxx-dataflow-example-operator-rust-api --release + ``` +- Compile the `dora-node-api-c` crate into a static library. + - Run `cargo build -p dora-node-api-c --release` + - The resulting staticlib is then available under `../../target/release/libdora-node-api-c.a`. +- Compile the `node-c-api/main.cc` (e.g. using `clang++`) and link the staticlib + - For example, use the following command: + ``` + clang++ node-c-api/main.cc -std=c++14 -ldora_node_api_c -L ../../target/release --output build/node_c_api + ``` + - The `` depend on the operating system and the libraries that the C node uses. The following flags are required for each OS: + - Linux: `-lm -lrt -ldl -pthread` + - macOS: `-framework CoreServices -framework Security -l System -l resolv -l pthread -l c -l m` + - Windows: + ``` + -ladvapi32 -luserenv -lkernel32 -lws2_32 -lbcrypt -lncrypt -lschannel -lntdll -liphlpapi + -lcfgmgr32 -lcredui -lcrypt32 -lcryptnet -lfwpuclnt -lgdi32 -lmsimg32 -lmswsock -lole32 + -lopengl32 -lsecur32 -lshell32 -lsynchronization -luser32 -lwinspool + -Wl,-nodefaultlib:libcmt -D_DLL -lmsvcrt + ``` + Also: On Windows, the output file should have an `.exe` extension: `--output build/c_node.exe` +- Compile the `operator-c-api/operator.cc` file into a shared library. + - For example, use the following commands: + ``` + clang++ -c operator-c-api/operator.cc -std=c++14 -o build/operator_c_api.o -fPIC + clang++ -shared build/operator_c_api.o -o build/liboperator_c_api.so + ``` + Omit the `-fPIC` argument on Windows. Replace the `liboperator_c_api.so` name with the shared library standard library prefix/extensions used on your OS, e.g. `.dll` on Windows. + +**Build the dora coordinator and runtime:** + +- Build the `dora-coordinator` executable using `cargo build -p dora-coordinator --release` +- Build the `dora-runtime` executable using `cargo build -p dora-runtime --release` + +**Run the dataflow:** + +- Start the `dora-coordinator`, passing the paths to the dataflow file and the `dora-runtime` as arguments: + + ``` + ../../target/release/dora-daemon --run-dataflow dataflow.yml ../../target/release/dora-runtime + ``` diff --git a/examples/c++-dataflow/dataflow.yml b/examples/c++-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..d2186dd890d7093c9c81d62d843f9e23e9162ea6 --- /dev/null +++ b/examples/c++-dataflow/dataflow.yml @@ -0,0 +1,33 @@ +nodes: + - id: cxx-node-rust-api + custom: + source: build/node_rust_api + inputs: + tick: dora/timer/millis/300 + outputs: + - counter + - id: cxx-node-c-api + custom: + source: build/node_c_api + inputs: + tick: cxx-node-rust-api/counter + outputs: + - counter + + - id: runtime-node-1 + operators: + - id: operator-rust-api + shared-library: build/operator_rust_api + inputs: + counter_1: cxx-node-c-api/counter + counter_2: cxx-node-rust-api/counter + outputs: + - status + - id: runtime-node-2 + operators: + - id: operator-c-api + shared-library: build/operator_c_api + inputs: + op_status: runtime-node-1/operator-rust-api/status + outputs: + - half-status diff --git a/examples/c++-dataflow/node-c-api/main.cc b/examples/c++-dataflow/node-c-api/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..8148bf197e52697b57b5b772f0842128c8a5cab0 --- /dev/null +++ b/examples/c++-dataflow/node-c-api/main.cc @@ -0,0 +1,85 @@ +extern "C" +{ +#include "../../../apis/c/node/node_api.h" +} + +#include +#include + +int run(void *dora_context) +{ + unsigned char counter = 0; + + for (int i = 0; i < 20; i++) + { + void *event = dora_next_event(dora_context); + if (event == NULL) + { + printf("[c node] ERROR: unexpected end of event\n"); + return -1; + } + + enum DoraEventType ty = read_dora_event_type(event); + + if (ty == DoraEventType_Input) + { + counter += 1; + + char *id_ptr; + size_t id_len; + read_dora_input_id(event, &id_ptr, &id_len); + std::string id(id_ptr, id_len); + + char *data_ptr; + size_t data_len; + read_dora_input_data(event, &data_ptr, &data_len); + std::vector data; + for (size_t i = 0; i < data_len; i++) + { + data.push_back(*(data_ptr + i)); + } + + std::cout + << "Received input " + << " (counter: " << (unsigned int)counter << ") data: ["; + for (unsigned char &v : data) + { + std::cout << (unsigned int)v << ", "; + } + std::cout << "]" << std::endl; + + std::vector out_vec{counter}; + std::string out_id = "counter"; + int result = dora_send_output(dora_context, &out_id[0], out_id.length(), (char *)&counter, 1); + if (result != 0) + { + std::cerr << "failed to send output" << std::endl; + return 1; + } + } + else if (ty == DoraEventType_Stop) + { + printf("[c node] received stop event\n"); + } + else + { + printf("[c node] received unexpected event: %d\n", ty); + } + + free_dora_event(event); + } + return 0; +} + +int main() +{ + std::cout << "HELLO FROM C++ (using C API)" << std::endl; + + auto dora_context = init_dora_context_from_env(); + auto ret = run(dora_context); + free_dora_context(dora_context); + + std::cout << "GOODBYE FROM C++ node (using C API)" << std::endl; + + return ret; +} diff --git a/examples/c++-dataflow/node-rust-api/main.cc b/examples/c++-dataflow/node-rust-api/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..9407650a664122da68fef6e0e991a99ca0ece66d --- /dev/null +++ b/examples/c++-dataflow/node-rust-api/main.cc @@ -0,0 +1,50 @@ +#include "../build/dora-node-api.h" + +#include +#include + +int main() +{ + std::cout << "HELLO FROM C++" << std::endl; + unsigned char counter = 0; + + auto dora_node = init_dora_node(); + + for (int i = 0; i < 20; i++) + { + + auto event = dora_node.events->next(); + auto ty = event_type(event); + + if (ty == DoraEventType::AllInputsClosed) + { + break; + } + else if (ty == DoraEventType::Input) + { + auto input = event_as_input(std::move(event)); + + counter += 1; + + std::cout << "Received input " << std::string(input.id) << " (counter: " << (unsigned int)counter << ")" << std::endl; + + std::vector out_vec{counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto result = send_output(dora_node.send_output, "counter", out_slice); + auto error = std::string(result.error); + if (!error.empty()) + { + std::cerr << "Error: " << error << std::endl; + return -1; + } + } + else + { + std::cerr << "Unknown event type " << static_cast(ty) << std::endl; + } + } + + std::cout << "GOODBYE FROM C++ node (using Rust API)" << std::endl; + + return 0; +} diff --git a/examples/c++-dataflow/operator-c-api/operator.cc b/examples/c++-dataflow/operator-c-api/operator.cc new file mode 100644 index 0000000000000000000000000000000000000000..f15492dda6082a5f747d81207afe95a60c254ecd --- /dev/null +++ b/examples/c++-dataflow/operator-c-api/operator.cc @@ -0,0 +1,79 @@ +extern "C" +{ +#include "../../../apis/c/operator/operator_api.h" +} + +#include +#include +#include +#include +#include + +class Operator +{ +public: + Operator(); +}; + +Operator::Operator() {} + +extern "C" DoraInitResult_t dora_init_operator() +{ + Operator *op = std::make_unique().release(); + + DoraInitResult_t result = {.operator_context = (void *)op}; + return result; +} + +extern "C" DoraResult_t dora_drop_operator(void *operator_context) +{ + delete (Operator *)operator_context; + return {}; +} + +extern "C" OnEventResult_t dora_on_event( + RawEvent_t *event, + const SendOutput_t *send_output, + void *operator_context) +{ + if (event->input != NULL) + { + // input event + Input_t *input = event->input; + char *id = dora_read_input_id(input); + + Vec_uint8_t data = dora_read_data(input); + assert(data.ptr != NULL); + + std::cout + << "C++ Operator (C-API) received input `" << id << "` with data: ["; + for (int i = 0; i < data.len; i++) + { + std::cout << (unsigned int)data.ptr[i] << ", "; + } + std::cout << "]" << std::endl; + + const char *out_id = "half-status"; + char *out_id_heap = strdup(out_id); + + size_t out_data_len = 1; + uint8_t *out_data_heap = (uint8_t *)malloc(out_data_len); + *out_data_heap = *data.ptr / 2; + + DoraResult_t send_result = dora_send_operator_output(send_output, out_id_heap, out_data_heap, out_data_len); + + OnEventResult_t result = {.result = send_result, .status = DORA_STATUS_CONTINUE}; + + dora_free_data(data); + dora_free_input_id(id); + + return result; + } + if (event->stop) + { + printf("C operator received stop event\n"); + } + + OnEventResult_t result = {.status = DORA_STATUS_CONTINUE}; + return result; +} diff --git a/examples/c++-dataflow/operator-rust-api/operator.cc b/examples/c++-dataflow/operator-rust-api/operator.cc new file mode 100644 index 0000000000000000000000000000000000000000..2b8121883cc0cf33c7921bc3359bd4698d4855fd --- /dev/null +++ b/examples/c++-dataflow/operator-rust-api/operator.cc @@ -0,0 +1,23 @@ +#include "operator.h" +#include +#include +#include "../build/dora-operator-api.h" + +Operator::Operator() {} + +std::unique_ptr new_operator() +{ + return std::make_unique(); +} + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender) +{ + op.counter += 1; + std::cout << "Rust API operator received input `" << id.data() << "` with data `" << (unsigned int)data[0] << "` (internal counter: " << (unsigned int)op.counter << ")" << std::endl; + + std::vector out_vec{op.counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto send_result = send_output(output_sender, rust::Str("status"), out_slice); + DoraOnInputResult result = {send_result.error, false}; + return result; +} diff --git a/examples/c++-dataflow/operator-rust-api/operator.h b/examples/c++-dataflow/operator-rust-api/operator.h new file mode 100644 index 0000000000000000000000000000000000000000..9b5e3ab22a4b46f5c2e5784148f04213b2debbaf --- /dev/null +++ b/examples/c++-dataflow/operator-rust-api/operator.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include "../../../apis/c/operator/operator_api.h" + +class Operator +{ +public: + Operator(); + unsigned char counter; +}; + +#include "../build/dora-operator-api.h" + +std::unique_ptr new_operator(); + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender); diff --git a/examples/c++-dataflow/run.rs b/examples/c++-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f966e19151b6ad74f4f2f27ccd7e1dbced165f8 --- /dev/null +++ b/examples/c++-dataflow/run.rs @@ -0,0 +1,299 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX, EXE_SUFFIX}, + path::Path, +}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("c++-dataflow-runner").wrap_err("failed to set up tracing")?; + + if cfg!(windows) { + tracing::error!( + "The c++ example does not work on Windows currently because of a linker error" + ); + return Ok(()); + } + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + let target = root.join("target"); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + tokio::fs::create_dir_all("build").await?; + let build_dir = Path::new("build"); + + build_package("dora-node-api-cxx").await?; + let node_cxxbridge = target + .join("cxxbridge") + .join("dora-node-api-cxx") + .join("src"); + tokio::fs::copy( + node_cxxbridge.join("lib.rs.cc"), + build_dir.join("node-bridge.cc"), + ) + .await?; + tokio::fs::copy( + node_cxxbridge.join("lib.rs.h"), + build_dir.join("dora-node-api.h"), + ) + .await?; + tokio::fs::write( + build_dir.join("operator.h"), + r###"#include "../operator-rust-api/operator.h""###, + ) + .await?; + + build_package("dora-operator-api-cxx").await?; + let operator_cxxbridge = target + .join("cxxbridge") + .join("dora-operator-api-cxx") + .join("src"); + tokio::fs::copy( + operator_cxxbridge.join("lib.rs.cc"), + build_dir.join("operator-bridge.cc"), + ) + .await?; + tokio::fs::copy( + operator_cxxbridge.join("lib.rs.h"), + build_dir.join("dora-operator-api.h"), + ) + .await?; + + build_package("dora-node-api-c").await?; + build_package("dora-operator-api-c").await?; + build_cxx_node( + root, + &[ + &dunce::canonicalize(Path::new("node-rust-api").join("main.cc"))?, + &dunce::canonicalize(build_dir.join("node-bridge.cc"))?, + ], + "node_rust_api", + &["-l", "dora_node_api_cxx"], + ) + .await?; + build_cxx_node( + root, + &[&dunce::canonicalize( + Path::new("node-c-api").join("main.cc"), + )?], + "node_c_api", + &["-l", "dora_node_api_c"], + ) + .await?; + build_cxx_operator( + &[ + &dunce::canonicalize(Path::new("operator-rust-api").join("operator.cc"))?, + &dunce::canonicalize(build_dir.join("operator-bridge.cc"))?, + ], + "operator_rust_api", + &[ + "-l", + "dora_operator_api_cxx", + "-L", + root.join("target").join("debug").to_str().unwrap(), + ], + ) + .await?; + build_cxx_operator( + &[&dunce::canonicalize( + Path::new("operator-c-api").join("operator.cc"), + )?], + "operator_c_api", + &[ + "-l", + "dora_operator_api_c", + "-L", + root.join("target").join("debug").to_str().unwrap(), + ], + ) + .await?; + + let dataflow = Path::new("dataflow.yml").to_owned(); + build_package("dora-runtime").await?; + run_dataflow(&dataflow).await?; + + Ok(()) +} + +async fn build_package(package: &str) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("build"); + cmd.arg("--package").arg(package); + if !cmd.status().await?.success() { + bail!("failed to build {package}"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} + +async fn build_cxx_node( + root: &Path, + paths: &[&Path], + out_name: &str, + args: &[&str], +) -> eyre::Result<()> { + let mut clang = tokio::process::Command::new("clang++"); + clang.args(paths); + clang.arg("-std=c++17"); + #[cfg(target_os = "linux")] + { + clang.arg("-l").arg("m"); + clang.arg("-l").arg("rt"); + clang.arg("-l").arg("dl"); + clang.arg("-pthread"); + } + #[cfg(target_os = "windows")] + { + clang.arg("-ladvapi32"); + clang.arg("-luserenv"); + clang.arg("-lkernel32"); + clang.arg("-lws2_32"); + clang.arg("-lbcrypt"); + clang.arg("-lncrypt"); + clang.arg("-lschannel"); + clang.arg("-lntdll"); + clang.arg("-liphlpapi"); + + clang.arg("-lcfgmgr32"); + clang.arg("-lcredui"); + clang.arg("-lcrypt32"); + clang.arg("-lcryptnet"); + clang.arg("-lfwpuclnt"); + clang.arg("-lgdi32"); + clang.arg("-lmsimg32"); + clang.arg("-lmswsock"); + clang.arg("-lole32"); + clang.arg("-lopengl32"); + clang.arg("-lsecur32"); + clang.arg("-lshell32"); + clang.arg("-lsynchronization"); + clang.arg("-luser32"); + clang.arg("-lwinspool"); + + clang.arg("-Wl,-nodefaultlib:libcmt"); + clang.arg("-D_DLL"); + clang.arg("-lmsvcrt"); + } + #[cfg(target_os = "macos")] + { + clang.arg("-framework").arg("CoreServices"); + clang.arg("-framework").arg("Security"); + clang.arg("-l").arg("System"); + clang.arg("-l").arg("resolv"); + clang.arg("-l").arg("pthread"); + clang.arg("-l").arg("c"); + clang.arg("-l").arg("m"); + } + clang.args(args); + clang.arg("-L").arg(root.join("target").join("debug")); + clang + .arg("--output") + .arg(Path::new("../build").join(format!("{out_name}{EXE_SUFFIX}"))); + if let Some(parent) = paths[0].parent() { + clang.current_dir(parent); + } + + if !clang.status().await?.success() { + bail!("failed to compile c++ node"); + }; + Ok(()) +} + +async fn build_cxx_operator( + paths: &[&Path], + out_name: &str, + link_args: &[&str], +) -> eyre::Result<()> { + let mut object_file_paths = Vec::new(); + + for path in paths { + let mut compile = tokio::process::Command::new("clang++"); + compile.arg("-c").arg(path); + compile.arg("-std=c++17"); + let object_file_path = path.with_extension("o"); + compile.arg("-o").arg(&object_file_path); + #[cfg(unix)] + compile.arg("-fPIC"); + if let Some(parent) = path.parent() { + compile.current_dir(parent); + } + if !compile.status().await?.success() { + bail!("failed to compile cxx operator"); + }; + object_file_paths.push(object_file_path); + } + + let mut link = tokio::process::Command::new("clang++"); + link.arg("-shared").args(&object_file_paths); + link.args(link_args); + #[cfg(target_os = "windows")] + { + link.arg("-ladvapi32"); + link.arg("-luserenv"); + link.arg("-lkernel32"); + link.arg("-lws2_32"); + link.arg("-lbcrypt"); + link.arg("-lncrypt"); + link.arg("-lschannel"); + link.arg("-lntdll"); + link.arg("-liphlpapi"); + + link.arg("-lcfgmgr32"); + link.arg("-lcredui"); + link.arg("-lcrypt32"); + link.arg("-lcryptnet"); + link.arg("-lfwpuclnt"); + link.arg("-lgdi32"); + link.arg("-lmsimg32"); + link.arg("-lmswsock"); + link.arg("-lole32"); + link.arg("-lopengl32"); + link.arg("-lsecur32"); + link.arg("-lshell32"); + link.arg("-lsynchronization"); + link.arg("-luser32"); + link.arg("-lwinspool"); + + link.arg("-Wl,-nodefaultlib:libcmt"); + link.arg("-D_DLL"); + link.arg("-lmsvcrt"); + link.arg("-fms-runtime-lib=static"); + } + #[cfg(target_os = "macos")] + { + link.arg("-framework").arg("CoreServices"); + link.arg("-framework").arg("Security"); + link.arg("-l").arg("System"); + link.arg("-l").arg("resolv"); + link.arg("-l").arg("pthread"); + link.arg("-l").arg("c"); + link.arg("-l").arg("m"); + } + link.arg("-o") + .arg(Path::new("../build").join(format!("{DLL_PREFIX}{out_name}{DLL_SUFFIX}"))); + if let Some(parent) = paths[0].parent() { + link.current_dir(parent); + } + if !link.status().await?.success() { + bail!("failed to create shared library from cxx operator (c api)"); + }; + + Ok(()) +} diff --git a/examples/c++-ros2-dataflow/.gitignore b/examples/c++-ros2-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5761abcfdf0c26a75374c945dfe366eaeee04285 --- /dev/null +++ b/examples/c++-ros2-dataflow/.gitignore @@ -0,0 +1 @@ +*.o diff --git a/examples/c++-ros2-dataflow/README.md b/examples/c++-ros2-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f70b50d6520d912b6baaacec288b361ea603e1c5 --- /dev/null +++ b/examples/c++-ros2-dataflow/README.md @@ -0,0 +1,61 @@ +# `cxx-ros2-dataflow` Example + +This c++ example shows how to publish/subscribe to both ROS2 and Dora. The dataflow consists of a single node that sends random movement commands to the [ROS2 `turtlesim_node`](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Introducing-Turtlesim/Introducing-Turtlesim.html). + +## Setup + +This examples requires a sourced ROS2 installation. + +- To set up ROS2, follow the [ROS2 installation](https://docs.ros.org/en/iron/Installation.html) guide. +- Don't forget to [source the ROS2 setup files](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Configuring-ROS2-Environment.html#source-the-setup-files) +- Follow tasks 1 and 2 of the [ROS2 turtlesim tutorial](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Introducing-Turtlesim/Introducing-Turtlesim.html#id3) + - Install the turtlesim package + - Start the turtlesim node through `ros2 run turtlesim turtlesim_node` + +## Running pub/sub example + +A ROS2 client to publish turtlesim ROS2 messages and a DORA node can subscribe and visualize it. + +From terminal 1 , sourcing the ROS2 installation and start ROS2 turtlesim window +``` +source /opt/ros/galactic/setup.bash +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +ros2 run turtlesim turtlesim_node +``` + +From terminal 2 from dora folder. Note the source command here is necessary as this allow ROS2 message types to be found and compile dynamically. +``` +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +source /opt/ros/galactic/setup.bash +cargo run --example cxx-ros2-dataflow --features ros2-examples +``` +And you will see the turtle move a few steps. + +## Running service example +The current service code example is a service client. To test with service server we can test with either ROS2 demo or ros2-client +- if using ROS2 demo the the command line is: +``` +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +ros2 run demo_nodes_cpp add_two_ints_server +``` + +start DORA service client from another terminal +``` +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +cargo run --example cxx-ros2-dataflow --features ros2-examples +``` + +- if using ros2-client the command line is: +``` +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +cargo run --example=ros2_service_server +``` + +then start DORA service client from another terminal +``` +export RMW_IMPLEMENTATION=rmw_fastrtps_cpp +cargo run --example cxx-ros2-dataflow --features ros2-examples +``` + +You can also put export RMW_IMPLEMENTATION=rmw_fastrtps_cpp into .bashrc + diff --git a/examples/c++-ros2-dataflow/dataflow.yml b/examples/c++-ros2-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..7639c603677c16c40915d04e6ecc5f3af2735c82 --- /dev/null +++ b/examples/c++-ros2-dataflow/dataflow.yml @@ -0,0 +1,8 @@ +nodes: + - id: cxx-node-rust-api + custom: + source: build/node_rust_api + inputs: + tick: dora/timer/millis/500 + outputs: + - pose diff --git a/examples/c++-ros2-dataflow/node-rust-api/main.cc b/examples/c++-ros2-dataflow/node-rust-api/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..6fae1c34c48f902ca0157e8338fabb71a6de10c5 --- /dev/null +++ b/examples/c++-ros2-dataflow/node-rust-api/main.cc @@ -0,0 +1,108 @@ +#include "../build/dora-node-api.h" +#include "../build/dora-ros2-bindings.h" + +#include +#include +#include + +int main() +{ + std::cout << "HELLO FROM C++" << std::endl; + + auto dora_node = init_dora_node(); + auto merged_events = dora_events_into_combined(std::move(dora_node.events)); + + auto qos = qos_default(); + qos.durability = Ros2Durability::Volatile; + qos.liveliness = Ros2Liveliness::Automatic; + qos.reliable = true; + qos.max_blocking_time = 0.1; + + auto ros2_context = init_ros2_context(); + auto node = ros2_context->new_node("/ros2_demo", "turtle_teleop"); + auto vel_topic = node->create_topic_geometry_msgs_Twist("/turtle1", "cmd_vel", qos); + auto vel_publisher = node->create_publisher(vel_topic, qos); + auto pose_topic = node->create_topic_turtlesim_Pose("/turtle1", "pose", qos); + auto pose_subscription = node->create_subscription(pose_topic, qos, merged_events); + + std::random_device dev; + std::default_random_engine gen(dev()); + std::uniform_real_distribution<> dist(0., 1.); + + auto service_qos = qos_default(); + service_qos.reliable = true; + service_qos.max_blocking_time = 0.1; + service_qos.keep_last = 1; + auto add_two_ints = node->create_client_example_interfaces_AddTwoInts("/", "add_two_ints", service_qos, merged_events); + add_two_ints->wait_for_service(node); + + auto received_ticks = 0; + auto responses_received = 0; + + for (int i = 0; i < 1000; i++) + { + auto event = merged_events.next(); + + if (event.is_dora()) + { + auto dora_event = downcast_dora(std::move(event)); + + auto ty = event_type(dora_event); + + if (ty == DoraEventType::AllInputsClosed) + { + break; + } + else if (ty == DoraEventType::Input) + { + auto input = event_as_input(std::move(dora_event)); + received_ticks += 1; + + std::cout << "Received input " << std::string(input.id) << std::endl; + + geometry_msgs::Twist twist = { + .linear = {.x = dist(gen) + 1, .y = 0, .z = 0}, + .angular = {.x = 0, .y = 0, .z = (dist(gen) - 0.5) * 5.0}}; + vel_publisher->publish(twist); + + example_interfaces::AddTwoInts_Request request = {.a = 4, .b = 5}; + add_two_ints->send_request(request); + } + else + { + std::cerr << "Unknown event type " << static_cast(ty) << std::endl; + } + + if (received_ticks > 20) + { + break; + } + } + else if (pose_subscription->matches(event)) + { + auto pose = pose_subscription->downcast(std::move(event)); + std::cout << "Received pose x:" << pose.x << ", y:" << pose.y << std::endl; + } + else if (add_two_ints->matches(event)) + { + auto response = add_two_ints->downcast(std::move(event)); + assert(response.sum == 9); + std::cout << "Received correct sum response from add_two_ints" << std::endl; + responses_received += 1; + } + else + { + std::cout << "received unexpected event" << std::endl; + } + } + + std::cout << "Received " << responses_received << " service responses" << std::endl; + assert(responses_received > 0); + + // try to access a constant for testing + assert((sensor_msgs::const_NavSatStatus_STATUS_NO_FIX() == -1)); + + std::cout << "GOODBYE FROM C++ node (using Rust API)" << std::endl; + + return 0; +} diff --git a/examples/c++-ros2-dataflow/run.rs b/examples/c++-ros2-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..918158c21362e4a5fb92b449a94f7772e3d914d6 --- /dev/null +++ b/examples/c++-ros2-dataflow/run.rs @@ -0,0 +1,165 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::{env::consts::EXE_SUFFIX, path::Path}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("c++-ros2-dataflow-example").wrap_err("failed to set up tracing")?; + + if cfg!(windows) { + tracing::error!( + "The c++ example does not work on Windows currently because of a linker error" + ); + return Ok(()); + } + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + let target = root.join("target"); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + tokio::fs::create_dir_all("build").await?; + let build_dir = Path::new("build"); + + build_package("dora-node-api-cxx", &["ros2-bridge"]).await?; + let node_cxxbridge = target.join("cxxbridge").join("dora-node-api-cxx"); + tokio::fs::copy( + node_cxxbridge.join("dora-node-api.cc"), + build_dir.join("dora-node-api.cc"), + ) + .await?; + tokio::fs::copy( + node_cxxbridge.join("dora-node-api.h"), + build_dir.join("dora-node-api.h"), + ) + .await?; + tokio::fs::copy( + node_cxxbridge.join("dora-ros2-bindings.cc"), + build_dir.join("dora-ros2-bindings.cc"), + ) + .await?; + tokio::fs::copy( + node_cxxbridge.join("dora-ros2-bindings.h"), + build_dir.join("dora-ros2-bindings.h"), + ) + .await?; + + build_cxx_node( + root, + &[ + &dunce::canonicalize(Path::new("node-rust-api").join("main.cc"))?, + &dunce::canonicalize(build_dir.join("dora-ros2-bindings.cc"))?, + &dunce::canonicalize(build_dir.join("dora-node-api.cc"))?, + ], + "node_rust_api", + &["-l", "dora_node_api_cxx"], + ) + .await?; + + let dataflow = Path::new("dataflow.yml").to_owned(); + run_dataflow(&dataflow).await?; + + Ok(()) +} + +async fn build_package(package: &str, features: &[&str]) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("build"); + cmd.arg("--package").arg(package); + if !features.is_empty() { + cmd.arg("--features").arg(features.join(",")); + } + if !cmd.status().await?.success() { + bail!("failed to compile {package}"); + }; + Ok(()) +} + +async fn build_cxx_node( + root: &Path, + paths: &[&Path], + out_name: &str, + args: &[&str], +) -> eyre::Result<()> { + let mut clang = tokio::process::Command::new("clang++"); + clang.args(paths); + clang.arg("-std=c++17"); + #[cfg(target_os = "linux")] + { + clang.arg("-l").arg("m"); + clang.arg("-l").arg("rt"); + clang.arg("-l").arg("dl"); + clang.arg("-pthread"); + } + #[cfg(target_os = "windows")] + { + clang.arg("-ladvapi32"); + clang.arg("-luserenv"); + clang.arg("-lkernel32"); + clang.arg("-lws2_32"); + clang.arg("-lbcrypt"); + clang.arg("-lncrypt"); + clang.arg("-lschannel"); + clang.arg("-lntdll"); + clang.arg("-liphlpapi"); + + clang.arg("-lcfgmgr32"); + clang.arg("-lcredui"); + clang.arg("-lcrypt32"); + clang.arg("-lcryptnet"); + clang.arg("-lfwpuclnt"); + clang.arg("-lgdi32"); + clang.arg("-lmsimg32"); + clang.arg("-lmswsock"); + clang.arg("-lole32"); + clang.arg("-lopengl32"); + clang.arg("-lsecur32"); + clang.arg("-lshell32"); + clang.arg("-lsynchronization"); + clang.arg("-luser32"); + clang.arg("-lwinspool"); + + clang.arg("-Wl,-nodefaultlib:libcmt"); + clang.arg("-D_DLL"); + clang.arg("-lmsvcrt"); + } + #[cfg(target_os = "macos")] + { + clang.arg("-framework").arg("CoreServices"); + clang.arg("-framework").arg("Security"); + clang.arg("-l").arg("System"); + clang.arg("-l").arg("resolv"); + clang.arg("-l").arg("pthread"); + clang.arg("-l").arg("c"); + clang.arg("-l").arg("m"); + } + clang.args(args); + clang.arg("-L").arg(root.join("target").join("debug")); + clang + .arg("--output") + .arg(Path::new("../build").join(format!("{out_name}{EXE_SUFFIX}"))); + if let Some(parent) = paths[0].parent() { + clang.current_dir(parent); + } + + if !clang.status().await?.success() { + bail!("failed to compile c++ node"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/c-dataflow/.gitignore b/examples/c-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..378eac25d311703f3f2cd456d8036da525cd0366 --- /dev/null +++ b/examples/c-dataflow/.gitignore @@ -0,0 +1 @@ +build diff --git a/examples/c-dataflow/README.md b/examples/c-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c2ed2243def159862c5a380e7b19e98fb69d9032 --- /dev/null +++ b/examples/c-dataflow/README.md @@ -0,0 +1,69 @@ +# C Dataflow Example + +This examples shows how to create and connect dora operators and custom nodes in C. + +## Overview + +The [`dataflow.yml`](./dataflow.yml) defines a simple dataflow graph with the following three nodes: + +- [`node.c`](./node.c) is a custom node, i.e., it has its own main function and runs as a separate process. It uses the [`dora-node-api-c` crate](../../apis/c/node/) to interact with the dora dataflow. + - The node has a single input named `timer` that is mapped to a dora-provided periodic timer (`dora/timer/secs/1`). + - Whenever the node receives a timer tick, it sends out a message with ID `tick` and a counter value as data (just a single byte). + - After receiving 10 timer inputs, the node exits. +- The [`operator.c`](./operator.c) file defines a dora _operator_ that is plugged as a shared library into a dora runtime. Instead of defining a `main` function, it implements a template of `dora_*` functions, which are invoked by the dora runtime, e.g. when new input is available. + - The operator takes the `tick` messages created by the `node.c` node as input. For each input value, it checks the ID and then prints the received message to `stdout`. + - It counts the received values and outputs a string of the format _"The current counter value is ..."_. +- The [`sink.c`](./sink.c) file defines a custom node again, which takes the output string of the operator as input. It prints each received input to stdout and exits as soon as the input stream is closed. + +## Compile and Run + +To try it out, you can use the [`run.rs`](./run.rs) binary. It performs all required build steps and then starts the dataflow. Use the following command to run it: `cargo run --example c-dataflow`. + +For a manual build, follow these steps: + +**Build the custom nodes:** + +- Create a `build` folder in this directory (i.e., next to the `node.c` file) +- Compile the `dora-node-api-c` crate into a static library. + - Run `cargo build -p dora-node-api-c --release` + - The resulting staticlib is then available under `../../target/release/libdora-node-api-c.a`. +- Compile the `node.c` (e.g. using `clang`) and link the staticlib + - For example, use the following command: + ``` + clang node.c -ldora_node_api_c -L ../../target/release --output build/c_node + ``` + - The `` depend on the operating system and the libraries that the C node uses. The following flags are required for each OS: + - Linux: `-lm -lrt -ldl -pthread` + - macOS: `-framework CoreServices -framework Security -l System -l resolv -l pthread -l c -l m` + - Windows: + ``` + -ladvapi32 -luserenv -lkernel32 -lws2_32 -lbcrypt -lncrypt -lschannel -lntdll -liphlpapi + -lcfgmgr32 -lcredui -lcrypt32 -lcryptnet -lfwpuclnt -lgdi32 -lmsimg32 -lmswsock -lole32 + -lopengl32 -lsecur32 -lshell32 -lsynchronization -luser32 -lwinspool + -Wl,-nodefaultlib:libcmt -D_DLL -lmsvcrt + ``` + Also: On Windows, the output file should have an `.exe` extension: `--output build/c_node.exe` +- Repeat the previous step for the `sink.c` executable + +**Build the operator:** + +- Compile the `operator.c` file into a shared library. + - For example, use the following commands: + ``` + clang -c operator.c -o build/operator.o -fdeclspec -fPIC + clang -shared build/operator.o -o build/liboperator.so + ``` + Omit the `-fPIC` argument on Windows. Replace the `liboperator.so` name with the shared library standard library prefix/extensions used on your OS, e.g. `.dll` on Windows. + +**Build the dora coordinator and runtime:** + +- Build the `dora-coordinator` executable using `cargo build -p dora-coordinator --release` +- Build the `dora-runtime` executable using `cargo build -p dora-runtime --release` + +**Run the dataflow:** + +- Start the `dora-coordinator`, passing the paths to the dataflow file and the `dora-runtime` as arguments: + + ``` + ../../target/release/dora-daemon --run-dataflow dataflow.yml ../../target/release/dora-runtime + ``` diff --git a/examples/c-dataflow/dataflow.yml b/examples/c-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..96d5da2f3e634136147258eedf57099888af8a7b --- /dev/null +++ b/examples/c-dataflow/dataflow.yml @@ -0,0 +1,21 @@ +nodes: + - id: c_node + custom: + source: build/c_node + inputs: + timer: dora/timer/millis/50 + outputs: + - message + - id: runtime-node + operators: + - id: c_operator + shared-library: build/operator + inputs: + message: c_node/message + outputs: + - counter + - id: c_sink + custom: + source: build/c_sink + inputs: + counter: runtime-node/c_operator/counter diff --git a/examples/c-dataflow/node.c b/examples/c-dataflow/node.c new file mode 100644 index 0000000000000000000000000000000000000000..9c39766a7e4abe17f2e7cf5b966e1e686a65774d --- /dev/null +++ b/examples/c-dataflow/node.c @@ -0,0 +1,70 @@ +#include +#include +#include +#include "../../apis/c/node/node_api.h" + +// sleep +#ifdef _WIN32 +#include +#else +#include +#endif + +int main() +{ + printf("[c node] Hello World\n"); + + void *dora_context = init_dora_context_from_env(); + if (dora_context == NULL) + { + fprintf(stderr, "failed to init dora context\n"); + return -1; + } + + printf("[c node] dora context initialized\n"); + + for (char i = 0; i < 100; i++) + { + void *event = dora_next_event(dora_context); + if (event == NULL) + { + printf("[c node] ERROR: unexpected end of event\n"); + return -1; + } + + enum DoraEventType ty = read_dora_event_type(event); + + if (ty == DoraEventType_Input) + { + char *data; + size_t data_len; + read_dora_input_data(event, &data, &data_len); + + assert(data_len == 0); + + char out_id[] = "message"; + char out_data[50]; + int out_data_len = sprintf(out_data, "loop iteration %d", i); + + dora_send_output(dora_context, out_id, strlen(out_id), out_data, out_data_len); + } + else if (ty == DoraEventType_Stop) + { + printf("[c node] received stop event\n"); + } + else + { + printf("[c node] received unexpected event: %d\n", ty); + } + + free_dora_event(event); + } + + printf("[c node] received 10 events\n"); + + free_dora_context(dora_context); + + printf("[c node] finished successfully\n"); + + return 0; +} diff --git a/examples/c-dataflow/operator.c b/examples/c-dataflow/operator.c new file mode 100644 index 0000000000000000000000000000000000000000..cc5ee83bd8330ecb87485b0a64d2ffd65891c7f1 --- /dev/null +++ b/examples/c-dataflow/operator.c @@ -0,0 +1,77 @@ +#include "../../apis/c/operator/operator_api.h" +#include +#include +#include +#include + +DoraInitResult_t dora_init_operator(void) +{ + void *context = malloc(1); + char *context_char = (char *)context; + *context_char = 0; + + DoraInitResult_t result = {.operator_context = context}; + return result; +} + +DoraResult_t dora_drop_operator(void *operator_context) +{ + free(operator_context); + + DoraResult_t result = {}; + return result; +} + +OnEventResult_t dora_on_event( + RawEvent_t *event, + const SendOutput_t *send_output, + void *operator_context) +{ + OnEventResult_t result = {.status = DORA_STATUS_CONTINUE}; + + char *counter = (char *)operator_context; + + if (event->input != NULL) + { + // input event + Input_t *input = event->input; + + char *id = dora_read_input_id(input); + + if (strcmp(id, "message") == 0) + { + printf("message event\n"); + + Vec_uint8_t data = dora_read_data(input); + assert(data.ptr != NULL); + + *counter += 1; + printf("C operator received message `%.*s`, counter: %i\n", (int)data.len, data.ptr, *counter); + + char *out_id = "counter"; + char *out_id_heap = strdup(out_id); + + int data_alloc_size = 100; + char *out_data = (char *)malloc(data_alloc_size); + int count = snprintf(out_data, data_alloc_size, "The current counter value is %d", *counter); + assert(count >= 0 && count < 100); + + DoraResult_t res = dora_send_operator_output(send_output, out_id_heap, (uint8_t *)out_data, strlen(out_data)); + result.result = res; + + dora_free_data(data); + } + else + { + printf("C operator received unexpected input %s, context: %i\n", id, *counter); + } + + dora_free_input_id(id); + } + if (event->stop) + { + printf("C operator received stop event\n"); + } + + return result; +} diff --git a/examples/c-dataflow/run.rs b/examples/c-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad484edfd3d0519e0616d89ce76c637a2f9f9f6c --- /dev/null +++ b/examples/c-dataflow/run.rs @@ -0,0 +1,186 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX, EXE_SUFFIX}, + path::Path, +}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("c-dataflow-runner").wrap_err("failed to set up tracing")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + tokio::fs::create_dir_all("build").await?; + + build_package("dora-node-api-c").await?; + build_c_node(root, "node.c", "c_node").await?; + build_c_node(root, "sink.c", "c_sink").await?; + + build_package("dora-operator-api-c").await?; + build_c_operator(root).await?; + + let dataflow = Path::new("dataflow.yml").to_owned(); + run_dataflow(&dataflow).await?; + + Ok(()) +} + +async fn build_package(package: &str) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("build"); + cmd.arg("--package").arg(package); + if !cmd.status().await?.success() { + bail!("failed to build {package}"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} + +async fn build_c_node(root: &Path, name: &str, out_name: &str) -> eyre::Result<()> { + let mut clang = tokio::process::Command::new("clang"); + clang.arg(name); + clang.arg("-l").arg("dora_node_api_c"); + #[cfg(target_os = "linux")] + { + clang.arg("-l").arg("m"); + clang.arg("-l").arg("rt"); + clang.arg("-l").arg("dl"); + clang.arg("-pthread"); + } + #[cfg(target_os = "windows")] + { + clang.arg("-ladvapi32"); + clang.arg("-luserenv"); + clang.arg("-lkernel32"); + clang.arg("-lws2_32"); + clang.arg("-lbcrypt"); + clang.arg("-lncrypt"); + clang.arg("-lschannel"); + clang.arg("-lntdll"); + clang.arg("-liphlpapi"); + + clang.arg("-lcfgmgr32"); + clang.arg("-lcredui"); + clang.arg("-lcrypt32"); + clang.arg("-lcryptnet"); + clang.arg("-lfwpuclnt"); + clang.arg("-lgdi32"); + clang.arg("-lmsimg32"); + clang.arg("-lmswsock"); + clang.arg("-lole32"); + clang.arg("-loleaut32"); + clang.arg("-lopengl32"); + clang.arg("-lsecur32"); + clang.arg("-lshell32"); + clang.arg("-lsynchronization"); + clang.arg("-luser32"); + clang.arg("-lwinspool"); + + clang.arg("-Wl,-nodefaultlib:libcmt"); + clang.arg("-D_DLL"); + clang.arg("-lmsvcrt"); + } + #[cfg(target_os = "macos")] + { + clang.arg("-framework").arg("CoreServices"); + clang.arg("-framework").arg("Security"); + clang.arg("-l").arg("System"); + clang.arg("-l").arg("resolv"); + clang.arg("-l").arg("pthread"); + clang.arg("-l").arg("c"); + clang.arg("-l").arg("m"); + } + clang.arg("-L").arg(root.join("target").join("debug")); + clang + .arg("--output") + .arg(Path::new("build").join(format!("{out_name}{EXE_SUFFIX}"))); + if !clang.status().await?.success() { + bail!("failed to compile c node"); + }; + Ok(()) +} + +async fn build_c_operator(root: &Path) -> eyre::Result<()> { + let mut compile = tokio::process::Command::new("clang"); + compile.arg("-c").arg("operator.c"); + compile.arg("-o").arg("build/operator.o"); + compile.arg("-fdeclspec"); + #[cfg(unix)] + compile.arg("-fPIC"); + if !compile.status().await?.success() { + bail!("failed to compile c operator"); + }; + + let mut link = tokio::process::Command::new("clang"); + link.arg("-shared").arg("build/operator.o"); + link.arg("-L").arg(root.join("target").join("debug")); + link.arg("-l").arg("dora_operator_api_c"); + #[cfg(target_os = "windows")] + { + link.arg("-ladvapi32"); + link.arg("-luserenv"); + link.arg("-lkernel32"); + link.arg("-lws2_32"); + link.arg("-lbcrypt"); + link.arg("-lncrypt"); + link.arg("-lschannel"); + link.arg("-lntdll"); + link.arg("-liphlpapi"); + + link.arg("-lcfgmgr32"); + link.arg("-lcredui"); + link.arg("-lcrypt32"); + link.arg("-lcryptnet"); + link.arg("-lfwpuclnt"); + link.arg("-lgdi32"); + link.arg("-lmsimg32"); + link.arg("-lmswsock"); + link.arg("-lole32"); + link.arg("-loleaut32"); + link.arg("-lopengl32"); + link.arg("-lsecur32"); + link.arg("-lshell32"); + link.arg("-lsynchronization"); + link.arg("-luser32"); + link.arg("-lwinspool"); + + link.arg("-Wl,-nodefaultlib:libcmt"); + link.arg("-D_DLL"); + link.arg("-lmsvcrt"); + } + #[cfg(target_os = "macos")] + { + link.arg("-framework").arg("CoreServices"); + link.arg("-framework").arg("Security"); + link.arg("-l").arg("System"); + link.arg("-l").arg("resolv"); + link.arg("-l").arg("pthread"); + link.arg("-l").arg("c"); + link.arg("-l").arg("m"); + } + link.arg("-o") + .arg(Path::new("build").join(format!("{DLL_PREFIX}operator{DLL_SUFFIX}"))); + if !link.status().await?.success() { + bail!("failed to link c operator"); + }; + + Ok(()) +} diff --git a/examples/c-dataflow/sink.c b/examples/c-dataflow/sink.c new file mode 100644 index 0000000000000000000000000000000000000000..12461c67768bc1f1413c6af501029eef0e72b02f --- /dev/null +++ b/examples/c-dataflow/sink.c @@ -0,0 +1,64 @@ +#include +#include +#include +#include "../../apis/c/node/node_api.h" + +int main() +{ + printf("[c sink] Hello World\n"); + + void *dora_context = init_dora_context_from_env(); + if (dora_context == NULL) + { + fprintf(stderr, "failed to init dora context\n"); + return -1; + } + printf("[c sink] dora context initialized\n"); + + while (1) + { + void *event = dora_next_event(dora_context); + if (event == NULL) + { + printf("[c sink] end of event\n"); + break; + } + + enum DoraEventType ty = read_dora_event_type(event); + + if (ty == DoraEventType_Input) + { + char *id; + size_t id_len; + read_dora_input_id(event, &id, &id_len); + + char *data; + size_t data_len; + read_dora_input_data(event, &data, &data_len); + + printf("[c sink] received input `"); + fwrite(id, id_len, 1, stdout); + printf("` with data: %.*s\n", (int)data_len, data); + } + else if (ty == DoraEventType_InputClosed) + { + printf("[c sink] received InputClosed event\n"); + } + else if (ty == DoraEventType_Stop) + { + printf("[c sink] received stop event\n"); + } + else + { + printf("[c sink] received unexpected event: %d\n", ty); + } + + free_dora_event(event); + } + + free_dora_context(dora_context); + + printf("[c sink] finished successfully\n"); + + return 0; +} diff --git a/examples/cmake-dataflow/.gitignore b/examples/cmake-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c928ceb0500c7cef19c1b71987387e592bfcc443 --- /dev/null +++ b/examples/cmake-dataflow/.gitignore @@ -0,0 +1,6 @@ +*.o + +# cmake artifact +build +lib +bin \ No newline at end of file diff --git a/examples/cmake-dataflow/CMakeLists.txt b/examples/cmake-dataflow/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4a5f8d8d753c66dc229589986388f5a97634c2a --- /dev/null +++ b/examples/cmake-dataflow/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.21) +project(cmake-dataflow LANGUAGES C CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_FLAGS "-fPIC") + +include(DoraTargets.cmake) + +link_directories(${dora_link_dirs}) + +add_executable(node_c_api node-c-api/main.cc) +add_dependencies(node_c_api Dora_c) +target_include_directories(node_c_api PRIVATE ${dora_c_include_dir}) +target_link_libraries(node_c_api dora_node_api_c) + +add_executable(node_rust_api node-rust-api/main.cc ${node_bridge}) +add_dependencies(node_rust_api Dora_cxx) +target_include_directories(node_rust_api PRIVATE ${dora_cxx_include_dir}) +target_link_libraries(node_rust_api dora_node_api_cxx) + +add_library(operator_c_api SHARED operator-c-api/operator.cc) +add_dependencies(operator_c_api Dora_c) +target_include_directories(operator_c_api PRIVATE ${dora_c_include_dir}) +target_link_libraries(operator_c_api dora_operator_api_c) + +add_library(operator_rust_api SHARED operator-rust-api/operator.cc ${operator_bridge}) +add_dependencies(operator_rust_api Dora_cxx) +target_include_directories(operator_rust_api PRIVATE ${dora_cxx_include_dir} ${dora_c_include_dir} ${CMAKE_CURRENT_SOURCE_DIR}/operator-rust-api) +target_link_libraries(operator_rust_api dora_operator_api_cxx) + +install(TARGETS node_c_api node_rust_api DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/bin) +install(TARGETS operator_c_api operator_rust_api DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/lib) diff --git a/examples/cmake-dataflow/DoraTargets.cmake b/examples/cmake-dataflow/DoraTargets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d33687c4b3c9281912acc799ef64b6f4f5f15dc1 --- /dev/null +++ b/examples/cmake-dataflow/DoraTargets.cmake @@ -0,0 +1,109 @@ +set(DORA_ROOT_DIR "" CACHE FILEPATH "Path to the root of dora") + +set(dora_c_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/c") + +set(dora_cxx_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/cxx") +set(node_bridge "${CMAKE_CURRENT_BINARY_DIR}/node_bridge.cc") +set(operator_bridge "${CMAKE_CURRENT_BINARY_DIR}/operator_bridge.cc") + +if(DORA_ROOT_DIR) + include(ExternalProject) + ExternalProject_Add(Dora + SOURCE_DIR ${DORA_ROOT_DIR} + BUILD_IN_SOURCE True + CONFIGURE_COMMAND "" + BUILD_COMMAND + cargo build + --package dora-node-api-c + && + cargo build + --package dora-operator-api-c + && + cargo build + --package dora-node-api-cxx + && + cargo build + --package dora-operator-api-cxx + INSTALL_COMMAND "" + ) + + add_custom_command(OUTPUT ${node_bridge} ${dora_cxx_include_dir} ${operator_bridge} ${dora_c_include_dir} + WORKING_DIRECTORY ${DORA_ROOT_DIR} + DEPENDS Dora + COMMAND + mkdir ${dora_cxx_include_dir} -p + && + mkdir ${CMAKE_CURRENT_BINARY_DIR}/include/c -p + && + cp target/cxxbridge/dora-node-api-cxx/src/lib.rs.cc ${node_bridge} + && + cp target/cxxbridge/dora-node-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-node-api.h + && + cp target/cxxbridge/dora-operator-api-cxx/src/lib.rs.cc ${operator_bridge} + && + cp target/cxxbridge/dora-operator-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-operator-api.h + && + cp apis/c/node ${CMAKE_CURRENT_BINARY_DIR}/include/c -r + && + cp apis/c/operator ${CMAKE_CURRENT_BINARY_DIR}/include/c -r + + ) + + add_custom_target(Dora_c DEPENDS ${dora_c_include_dir}) + add_custom_target(Dora_cxx DEPENDS ${node_bridge} ${operator_bridge} ${dora_cxx_include_dir}) + set(dora_link_dirs ${DORA_ROOT_DIR}/target/debug) +else() + include(ExternalProject) + ExternalProject_Add(Dora + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/dora + GIT_REPOSITORY https://github.com/dora-rs/dora.git + GIT_TAG main + BUILD_IN_SOURCE True + CONFIGURE_COMMAND "" + BUILD_COMMAND + cargo build + --package dora-node-api-c + --target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target + && + cargo build + --package dora-operator-api-c + --target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target + && + cargo build + --package dora-node-api-cxx + --target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target + && + cargo build + --package dora-operator-api-cxx + --target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target + INSTALL_COMMAND "" + ) + + add_custom_command(OUTPUT ${node_bridge} ${dora_cxx_include_dir} ${operator_bridge} ${dora_c_include_dir} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target + DEPENDS Dora + COMMAND + mkdir ${CMAKE_CURRENT_BINARY_DIR}/include/c -p + && + mkdir ${dora_cxx_include_dir} -p + && + cp cxxbridge/dora-node-api-cxx/src/lib.rs.cc ${node_bridge} + && + cp cxxbridge/dora-node-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-node-api.h + && + cp cxxbridge/dora-operator-api-cxx/src/lib.rs.cc ${operator_bridge} + && + cp cxxbridge/dora-operator-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-operator-api.h + && + cp ../apis/c/node ${CMAKE_CURRENT_BINARY_DIR}/include/c -r + && + cp ../apis/c/operator ${CMAKE_CURRENT_BINARY_DIR}/include/c -r + ) + + set(dora_link_dirs ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target/debug) + + add_custom_target(Dora_c DEPENDS ${dora_c_include_dir}) + add_custom_target(Dora_cxx DEPENDS ${node_bridge} ${operator_bridge} ${dora_cxx_include_dir}) +endif() + + diff --git a/examples/cmake-dataflow/README.md b/examples/cmake-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4fd187cf1ae179f3ba1c141fe1bf55215dffa01d --- /dev/null +++ b/examples/cmake-dataflow/README.md @@ -0,0 +1,22 @@ +# Dora CMake Dataflow Example + +This example shows how to create dora operators and custom nodes in CMake build system. + +See also [c++-example](https://github.com/dora-rs/dora/blob/main/examples/c%2B%2B-dataflow/README.md) for the implementation details of operator and node. + +## Compile and Run + +To try it out, you can use the [`run.rs`](./run.rs) binary. It performs all required build steps and then starts the dataflow. Use the following command to run it: `cargo run --example cmake-dataflow`. + +## Out-of-tree compile + +This example also can be ran in a separate root directory. +``` +cd +mkdir build +cd build && cmake .. +make install +cd .. +dora up +dora start dataflow.yml +``` diff --git a/examples/cmake-dataflow/dataflow.yml b/examples/cmake-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..d6eac623c4a68da76d84a8b7b2320134ca4f2d4d --- /dev/null +++ b/examples/cmake-dataflow/dataflow.yml @@ -0,0 +1,33 @@ +nodes: + - id: cxx-node-rust-api + custom: + source: bin/node_rust_api + inputs: + tick: dora/timer/millis/300 + outputs: + - counter + - id: cxx-node-c-api + custom: + source: bin/node_c_api + inputs: + tick: dora/timer/millis/300 + outputs: + - counter + + - id: runtime-node-1 + operators: + - id: operator-rust-api + shared-library: lib/operator_rust_api + inputs: + counter_1: cxx-node-c-api/counter + counter_2: cxx-node-rust-api/counter + outputs: + - status + - id: runtime-node-2 + operators: + - id: operator-c-api + shared-library: lib/operator_c_api + inputs: + op_status: runtime-node-1/operator-rust-api/status + outputs: + - half-status diff --git a/examples/cmake-dataflow/node-c-api/main.cc b/examples/cmake-dataflow/node-c-api/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..83afe19f8a0db55ad744f6e731cd9f2d5caa0fe6 --- /dev/null +++ b/examples/cmake-dataflow/node-c-api/main.cc @@ -0,0 +1,85 @@ +extern "C" +{ +#include "node/node_api.h" +} + +#include +#include + +int run(void *dora_context) +{ + unsigned char counter = 0; + + for (int i = 0; i < 20; i++) + { + void *event = dora_next_event(dora_context); + if (event == NULL) + { + printf("[c node] ERROR: unexpected end of event\n"); + return -1; + } + + enum DoraEventType ty = read_dora_event_type(event); + + if (ty == DoraEventType_Input) + { + counter += 1; + + char *id_ptr; + size_t id_len; + read_dora_input_id(event, &id_ptr, &id_len); + std::string id(id_ptr, id_len); + + char *data_ptr; + size_t data_len; + read_dora_input_data(event, &data_ptr, &data_len); + std::vector data; + for (size_t i = 0; i < data_len; i++) + { + data.push_back(*(data_ptr + i)); + } + + std::cout + << "Received input " + << " (counter: " << (unsigned int)counter << ") data: ["; + for (unsigned char &v : data) + { + std::cout << (unsigned int)v << ", "; + } + std::cout << "]" << std::endl; + + std::vector out_vec{counter}; + std::string out_id = "counter"; + int result = dora_send_output(dora_context, &out_id[0], out_id.length(), (char *)&counter, 1); + if (result != 0) + { + std::cerr << "failed to send output" << std::endl; + return 1; + } + } + else if (ty == DoraEventType_Stop) + { + printf("[c node] received stop event\n"); + } + else + { + printf("[c node] received unexpected event: %d\n", ty); + } + + free_dora_event(event); + } + return 0; +} + +int main() +{ + std::cout << "HELLO FROM C++ (using C API)" << std::endl; + + auto dora_context = init_dora_context_from_env(); + auto ret = run(dora_context); + free_dora_context(dora_context); + + std::cout << "GOODBYE FROM C++ node (using C API)" << std::endl; + + return ret; +} diff --git a/examples/cmake-dataflow/node-rust-api/main.cc b/examples/cmake-dataflow/node-rust-api/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..efc8f22814b6ebfd9e11a85ff9ad303e2d8c45d6 --- /dev/null +++ b/examples/cmake-dataflow/node-rust-api/main.cc @@ -0,0 +1,50 @@ +#include "dora-node-api.h" + +#include +#include + +int main() +{ + std::cout << "HELLO FROM C++" << std::endl; + unsigned char counter = 0; + + auto dora_node = init_dora_node(); + + for (int i = 0; i < 20; i++) + { + + auto event = next_event(dora_node.events); + auto ty = event_type(event); + + if (ty == DoraEventType::AllInputsClosed) + { + break; + } + else if (ty == DoraEventType::Input) + { + auto input = event_as_input(std::move(event)); + + counter += 1; + + std::cout << "Received input " << std::string(input.id) << " (counter: " << (unsigned int)counter << ")" << std::endl; + + std::vector out_vec{counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto result = send_output(dora_node.send_output, "counter", out_slice); + auto error = std::string(result.error); + if (!error.empty()) + { + std::cerr << "Error: " << error << std::endl; + return -1; + } + } + else + { + std::cerr << "Unknown event type " << static_cast(ty) << std::endl; + } + } + + std::cout << "GOODBYE FROM C++ node (using Rust API)" << std::endl; + + return 0; +} diff --git a/examples/cmake-dataflow/operator-c-api/operator.cc b/examples/cmake-dataflow/operator-c-api/operator.cc new file mode 100644 index 0000000000000000000000000000000000000000..f15492dda6082a5f747d81207afe95a60c254ecd --- /dev/null +++ b/examples/cmake-dataflow/operator-c-api/operator.cc @@ -0,0 +1,79 @@ +extern "C" +{ +#include "../../../apis/c/operator/operator_api.h" +} + +#include +#include +#include +#include +#include + +class Operator +{ +public: + Operator(); +}; + +Operator::Operator() {} + +extern "C" DoraInitResult_t dora_init_operator() +{ + Operator *op = std::make_unique().release(); + + DoraInitResult_t result = {.operator_context = (void *)op}; + return result; +} + +extern "C" DoraResult_t dora_drop_operator(void *operator_context) +{ + delete (Operator *)operator_context; + return {}; +} + +extern "C" OnEventResult_t dora_on_event( + RawEvent_t *event, + const SendOutput_t *send_output, + void *operator_context) +{ + if (event->input != NULL) + { + // input event + Input_t *input = event->input; + char *id = dora_read_input_id(input); + + Vec_uint8_t data = dora_read_data(input); + assert(data.ptr != NULL); + + std::cout + << "C++ Operator (C-API) received input `" << id << "` with data: ["; + for (int i = 0; i < data.len; i++) + { + std::cout << (unsigned int)data.ptr[i] << ", "; + } + std::cout << "]" << std::endl; + + const char *out_id = "half-status"; + char *out_id_heap = strdup(out_id); + + size_t out_data_len = 1; + uint8_t *out_data_heap = (uint8_t *)malloc(out_data_len); + *out_data_heap = *data.ptr / 2; + + DoraResult_t send_result = dora_send_operator_output(send_output, out_id_heap, out_data_heap, out_data_len); + + OnEventResult_t result = {.result = send_result, .status = DORA_STATUS_CONTINUE}; + + dora_free_data(data); + dora_free_input_id(id); + + return result; + } + if (event->stop) + { + printf("C operator received stop event\n"); + } + + OnEventResult_t result = {.status = DORA_STATUS_CONTINUE}; + return result; +} diff --git a/examples/cmake-dataflow/operator-rust-api/operator.cc b/examples/cmake-dataflow/operator-rust-api/operator.cc new file mode 100644 index 0000000000000000000000000000000000000000..d158568ee03f5ed72ec95628a5bd8e06319d5535 --- /dev/null +++ b/examples/cmake-dataflow/operator-rust-api/operator.cc @@ -0,0 +1,23 @@ +#include "operator.h" +#include +#include +#include "dora-operator-api.h" + +Operator::Operator() {} + +std::unique_ptr new_operator() +{ + return std::make_unique(); +} + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender) +{ + op.counter += 1; + std::cout << "Rust API operator received input `" << id.data() << "` with data `" << (unsigned int)data[0] << "` (internal counter: " << (unsigned int)op.counter << ")" << std::endl; + + std::vector out_vec{op.counter}; + rust::Slice out_slice{out_vec.data(), out_vec.size()}; + auto send_result = send_output(output_sender, rust::Str("status"), out_slice); + DoraOnInputResult result = {send_result.error, false}; + return result; +} diff --git a/examples/cmake-dataflow/operator-rust-api/operator.h b/examples/cmake-dataflow/operator-rust-api/operator.h new file mode 100644 index 0000000000000000000000000000000000000000..79fad9931d87089a5b662fc997e48bc13a342ef7 --- /dev/null +++ b/examples/cmake-dataflow/operator-rust-api/operator.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include "operator/operator_api.h" + +class Operator +{ +public: + Operator(); + unsigned char counter; +}; + +#include "dora-operator-api.h" + +std::unique_ptr new_operator(); + +DoraOnInputResult on_input(Operator &op, rust::Str id, rust::Slice data, OutputSender &output_sender); diff --git a/examples/cmake-dataflow/run.rs b/examples/cmake-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..30e3c9d117fb38d2910a8aa6d78e65cc5fb329f8 --- /dev/null +++ b/examples/cmake-dataflow/run.rs @@ -0,0 +1,72 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("cmake-dataflow-runner").wrap_err("failed to set up tracing")?; + + if cfg!(windows) { + tracing::error!( + "The c++ example does not work on Windows currently because of a linker error" + ); + return Ok(()); + } + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + tokio::fs::create_dir_all("build").await?; + let mut cmd = tokio::process::Command::new("cmake"); + cmd.arg(format!("-DDORA_ROOT_DIR={}", root.display())); + cmd.arg("-B").arg("build"); + cmd.arg("."); + if !cmd.status().await?.success() { + bail!("failed to generating make file"); + } + + let mut cmd = tokio::process::Command::new("cmake"); + cmd.arg("--build").arg("build"); + if !cmd.status().await?.success() { + bail!("failed to build a cmake-generated project binary tree"); + } + + let mut cmd = tokio::process::Command::new("cmake"); + cmd.arg("--install").arg("build"); + if !cmd.status().await?.success() { + bail!("failed to build a cmake-generated project binary tree"); + } + + let dataflow = Path::new("dataflow.yml").to_owned(); + build_package("dora-runtime").await?; + run_dataflow(&dataflow).await?; + + Ok(()) +} + +async fn build_package(package: &str) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("build"); + cmd.arg("--package").arg(package); + if !cmd.status().await?.success() { + bail!("failed to build {package}"); + } + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/hf-operator/__pycache__/bunny_demo.cpython-310.pyc b/examples/hf-operator/__pycache__/bunny_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00f9d5a03b9b011e8b55e04cba925d3df4e7e23 Binary files /dev/null and b/examples/hf-operator/__pycache__/bunny_demo.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/bunny_demo.cpython-38.pyc b/examples/hf-operator/__pycache__/bunny_demo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b2b8a6e4ce18a15d8c47f3f2790cb081816a402 Binary files /dev/null and b/examples/hf-operator/__pycache__/bunny_demo.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/constants.cpython-310.pyc b/examples/hf-operator/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8541f1c284d050515bd4377b153898eec773a77b Binary files /dev/null and b/examples/hf-operator/__pycache__/constants.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/constants.cpython-38.pyc b/examples/hf-operator/__pycache__/constants.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60fb0411ddc5bf990eee8359939c3b806a5a8e26 Binary files /dev/null and b/examples/hf-operator/__pycache__/constants.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-310.pyc b/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09b3cf32b72f48b07ee8145112b106ca602a80c8 Binary files /dev/null and b/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-38.pyc b/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3ad2b03715c377c7b5850af7ec28b2d928fda1c Binary files /dev/null and b/examples/hf-operator/__pycache__/idefics2_op_demo.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/llm_op.cpython-310.pyc b/examples/hf-operator/__pycache__/llm_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4a693075f414bbb85718e349e3750936fc56b2 Binary files /dev/null and b/examples/hf-operator/__pycache__/llm_op.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/llm_op.cpython-38.pyc b/examples/hf-operator/__pycache__/llm_op.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f2c72dbac67b21281db8320b03c0f109f6f5dfb Binary files /dev/null and b/examples/hf-operator/__pycache__/llm_op.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/object_detection.cpython-310.pyc b/examples/hf-operator/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0504902a221fdd5f89686d078d141fe63e5d407 Binary files /dev/null and b/examples/hf-operator/__pycache__/object_detection.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/parler_op.cpython-310.pyc b/examples/hf-operator/__pycache__/parler_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a0079cb19f5cec314f5cda4db82c5d5405bfd95 Binary files /dev/null and b/examples/hf-operator/__pycache__/parler_op.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/parler_op.cpython-38.pyc b/examples/hf-operator/__pycache__/parler_op.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580318846526984e89e21ccf849ec74e1d9a4df3 Binary files /dev/null and b/examples/hf-operator/__pycache__/parler_op.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/planning_op.cpython-310.pyc b/examples/hf-operator/__pycache__/planning_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..638e3c738899530c64a89a79b7b65050fdbcadff Binary files /dev/null and b/examples/hf-operator/__pycache__/planning_op.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/planning_op.cpython-38.pyc b/examples/hf-operator/__pycache__/planning_op.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47036aadba69986e78aaaa669650a818be46b2b7 Binary files /dev/null and b/examples/hf-operator/__pycache__/planning_op.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/policy.cpython-310.pyc b/examples/hf-operator/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1fdd72cc84ab25b6d27b4f9eafea746ef3e7226 Binary files /dev/null and b/examples/hf-operator/__pycache__/policy.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/policy.cpython-38.pyc b/examples/hf-operator/__pycache__/policy.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7396a73724b7574880521a791710ca50824e2093 Binary files /dev/null and b/examples/hf-operator/__pycache__/policy.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/reload.cpython-310.pyc b/examples/hf-operator/__pycache__/reload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5df59902e9586c6474ac8a2badf8feeea5e069da Binary files /dev/null and b/examples/hf-operator/__pycache__/reload.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/reload.cpython-38.pyc b/examples/hf-operator/__pycache__/reload.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8b5223058aa381c18909d4352344fa7cbc374c6 Binary files /dev/null and b/examples/hf-operator/__pycache__/reload.cpython-38.pyc differ diff --git a/examples/hf-operator/__pycache__/utils.cpython-310.pyc b/examples/hf-operator/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c87a15768c99da893d00f3ea8036daad1741c35 Binary files /dev/null and b/examples/hf-operator/__pycache__/utils.cpython-310.pyc differ diff --git a/examples/hf-operator/__pycache__/utils.cpython-38.pyc b/examples/hf-operator/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efa9b20758a4f28e863f3776ff93289032d2da7b Binary files /dev/null and b/examples/hf-operator/__pycache__/utils.cpython-38.pyc differ diff --git a/examples/hf-operator/bunny_demo.py b/examples/hf-operator/bunny_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5c0f2d3131a019317d11c06457ae2ad609a627 --- /dev/null +++ b/examples/hf-operator/bunny_demo.py @@ -0,0 +1,117 @@ + +import sys +print("Python Version:", sys.version) +from dora import DoraStatus +import pyarrow as pa +from transformers import AutoProcessor, AutoModelForCausalLM,AutoTokenizer +from PIL import Image +import torch +import gc + + +CAMERA_WIDTH = 1280 +CAMERA_HEIGHT = 720 +#修改 +# elements = 1555200 +# # 一个可能的尺寸计算 +# CAMERA_HEIGHT = 720 +# CAMERA_WIDTH = elements // (3 * CAMERA_HEIGHT) +# print(CAMERA_WIDTH) +# PROCESSOR = AutoProcessor.from_pretrained("/home/peiji/Bunny-v1_0-2B-zh") +tokenizer = AutoTokenizer.from_pretrained( + '/mnt/c/Bunny-v1_0-2B-zh/', + trust_remote_code=True) +BAD_WORDS_IDS =tokenizer( + ["", ""], add_special_tokens=False +).input_ids +EOS_WORDS_IDS = tokenizer( + "", add_special_tokens=False +).input_ids + [tokenizer.eos_token_id] + +# set device +device = 'cuda' # or cpu +torch.set_default_device(device) + +# create model +model = AutoModelForCausalLM.from_pretrained( + '/mnt/c/Bunny-v1_0-2B-zh/', + torch_dtype=torch.float16, # float32 for cpu + device_map='auto', + trust_remote_code=True + ) + +print("load bunny model finish") + + +def ask_vlm(image, instruction): + global model + prompts = [ + "User:", + image, + f"{instruction}.\n", + "Assistant:", + ] + inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()} + + generated_ids = model.generate( + **inputs, + bad_words_ids=BAD_WORDS_IDS, + max_new_tokens=25, + repetition_penalty=1.2, + ) + generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True) + + gc.collect() + torch.cuda.empty_cache() + return generated_texts[0].split("\nAssistant: ")[1] + + +import time + + +class Operator: + def __init__(self): + self.image = None + self.text = None + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + if dora_event["id"] == "image": + self.image = ( + dora_event["value"] + .to_numpy() + .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + elif dora_event["id"] == "text": + self.text = dora_event["value"][0].as_py() + output = ask_vlm(self.image, self.text).lower() + send_output( + "speak", + pa.array([output]), + ) + if "yes" in output: + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 0.0]), + ) + time.sleep(2) + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]), + ) + elif "no" in output: + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0]), + ) + time.sleep(2) + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), + ) + + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/config.py b/examples/hf-operator/config.py new file mode 100644 index 0000000000000000000000000000000000000000..400153e36c2164f65bc32e2663324b4d57f59fd6 --- /dev/null +++ b/examples/hf-operator/config.py @@ -0,0 +1,4 @@ +import dataclasses + +@dataclasses +Config: diff --git a/examples/hf-operator/constants.py b/examples/hf-operator/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..444852af5a053df0760572bfacb963638953dc05 --- /dev/null +++ b/examples/hf-operator/constants.py @@ -0,0 +1,33 @@ +import numpy as np + +# LOCATION = { +# "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]), +# "KITCHEN": np.array([[0.0, 0.5], [1.5, -1.0]]), +# } + +LOCATION = { + "HOME": { + "OFFICE": np.array([[1.0, 1.0]]), + "KITCHEN": np.array([[0.0, 1.0]]), + "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]), + }, + "OFFICE": {"KITCHEN": np.array([[0.0, 1.0]]), "HOME": np.array([0.0, 0.0])}, + "KITCHEN": { + "OFFICE": np.array([[1.0, 1.0]]), + "HOME": np.array([0.0, 0.0]), + }, +} + +# LOCATION = { +# "HOME": { +# "OFFICE": np.array([[0.0, 0.3], [-1.5, 0.7]]), +# "KITCHEN": np.array([[-1.0, -0.5], [1.5, 0.0], [1.5, 0.5]]), +# "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]), +# }, +# "OFFICE": { +# "KITCHEN": np.array([[0.0, 0.5], [1.5, -1.0]]), +# }, +# "KITCHEN": { +# "OFFICE": np.array([[-1.5, -0.5], [-1.5, 1.0]]), +# }, +# } diff --git a/examples/hf-operator/dataflow_basic.yml b/examples/hf-operator/dataflow_basic.yml new file mode 100644 index 0000000000000000000000000000000000000000..3aeece9c48afaf8b0bb249f46fe3407813e24e24 --- /dev/null +++ b/examples/hf-operator/dataflow_basic.yml @@ -0,0 +1,34 @@ +nodes: + - id: robot + operator: + python: ../operators/robot.py + inputs: + control: + source: keyboard/submitted + queue_size: 1 + tick: + source: dora/timer/millis/200 + queue_size: 1 + + - id: bot_webcam + custom: + source: ../operators/opencv_stream.py + outputs: + - image + + ### Camera + - id: plot_bot + operator: + python: ../operators/plot.py + inputs: + image: bot_webcam/image + keyboard_buffer: keyboard/buffer + user_message: keyboard/submitted + + - id: keyboard + custom: + source: ../operators/keyboard_op.py + outputs: + - buffer + - submitted + diff --git a/examples/hf-operator/dataflow_robot_basic.yml b/examples/hf-operator/dataflow_robot_basic.yml new file mode 100644 index 0000000000000000000000000000000000000000..a77120a60bfd85faa07ee911296cf7edcb59fcad --- /dev/null +++ b/examples/hf-operator/dataflow_robot_basic.yml @@ -0,0 +1,50 @@ +nodes: + - id: plot_bot + operator: + python: ../operators/plot.py + inputs: + image: webcam/image + position: robot/position + + - id: robot + operator: + python: ../operators/robot.py + inputs: + tick: dora/timer/millis/750 + planning_control: planning/control + outputs: + - control_reply + - position + + - id: webcam + custom: + source: ../operators/opencv_stream.py + outputs: + - image + + - id: policy + operator: + python: ../operators/policy.py + inputs: + init: dora/timer/millis/750 + goal_reached: planning/goal_reached + outputs: + - go_to + - reloaded + + - id: planning + operator: + python: ../operators/planning_op.py + inputs: + position: robot/position + control_reply: robot/control_reply + set_goal: policy/go_to + image: + source: webcam/image + queue_size: 1 + outputs: + - control + - goal_reached + + + diff --git a/examples/hf-operator/dataflow_robot_bunny.yml b/examples/hf-operator/dataflow_robot_bunny.yml new file mode 100644 index 0000000000000000000000000000000000000000..e389e7543b1ceb101781c1495b71c1032bc3345c --- /dev/null +++ b/examples/hf-operator/dataflow_robot_bunny.yml @@ -0,0 +1,78 @@ +nodes: +### Camera + # - id: rerun + # custom: + # source: dora-rerun + # inputs: + # image: webcam/image + # textlog_llm: whisper/text_llm + # textlog_policy: whisper/text_policy + # envs: + # IMAGE_WIDTH: 1280 + # IMAGE_HEIGHT: 720 + # IMAGE_DEPTH: 3 + # RERUN_MEMORY_LIMIT: 10% + + # - id: robot + # custom: + # source: /home/peiji/anaconda3/envs/dora38/bin/python + # args: robot_minimize.py + # inputs: + # tick: dora/timer/millis/750 + # planning_control: planning/control + # led: whisper/led + # outputs: + # - control_reply + # - position + + - id: webcam + custom: + source: opencv_stream.py + outputs: + - image + + - id: whisper + custom: + source: whisper_op.py + inputs: + audio: dora/timer/millis/1000 + outputs: + - text_policy + - text_llm + - led + + - id: llm + operator: + python: llm_op.py + inputs: + text: whisper/text_llm + + - id: policy + operator: + python: policy.py + inputs: + speech: whisper/text_policy + # reached_kitchen: planning/reached_kitchen + # reached_living_room: planning/reached_living_room + # reached_office: planning/reached_office + outputs: + - go_to + + # - id: planning + # operator: + # python: planning_op.py + # inputs: + # # position: robot/position + # # control_reply: robot/control_reply + # set_goal: policy/go_to + # image: + # source: webcam/image + # queue_size: 1 + # outputs: + # - control + # - reached_kitchen + # - reached_living_room + # - reached_office + + + diff --git a/examples/hf-operator/dataflow_robot_vlm.yml b/examples/hf-operator/dataflow_robot_vlm.yml new file mode 100644 index 0000000000000000000000000000000000000000..f12087d99e05a1bbf3d21f35b3e6bd9aa6fd6467 --- /dev/null +++ b/examples/hf-operator/dataflow_robot_vlm.yml @@ -0,0 +1,104 @@ +nodes: +### Camera + - id: rerun + custom: + source: dora-rerun + inputs: + image: webcam/image + textlog_llm: whisper/text_llm + boxes2d: object_detection/bbox + # textlog_llm: whisper/text_llm + # textlog_policy: whisper/text_policy + envs: + IMAGE_WIDTH: 1920 + IMAGE_HEIGHT: 1080 + IMAGE_DEPTH: 3 + RERUN_MEMORY_LIMIT: 10% + + - id: robot + custom: + source: /home/peiji/anaconda3/envs/dora38/bin/python + args: robot.py + inputs: + tick: dora/timer/millis/750 + planning_control: planning/control + led: whisper/led + outputs: + - control_reply + - position + + - id: webcam + custom: + source: opencv_stream.py + outputs: + - image + envs: + IMAGE_WIDTH: 1920 + IMAGE_HEIGHT: 1080 + + + - id: object_detection + custom: + source: object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + envs: + IMAGE_WIDTH: 1920 + IMAGE_HEIGHT: 1080 + + + - id: whisper + custom: + source: whisper_op.py + inputs: + audio: dora/timer/millis/1000 + outputs: + - text_policy + - text_llm + - led + + - id: llm + operator: + python: llm_op.py + inputs: + text: whisper/text_llm + + - id: policy + operator: + python: policy.py + inputs: + speech: whisper/text_policy + reached_kitchen: planning/reached_kitchen + reached_living_room: planning/reached_living_room + reached_office: planning/reached_office + outputs: + - go_to + + - id: planning + operator: + python: planning_op.py + inputs: + position: robot/position + control_reply: robot/control_reply + set_goal: policy/go_to + image: + source: webcam/image + queue_size: 1 + outputs: + - control + - reached_kitchen + - reached_living_room + - reached_office + + # - id: matplotlib + # custom: + # source: plot.py + # inputs: + # image: webcam/image + # # bbox: object_detection/bbox + # envs: + # IMAGE_WIDTH: 720 + # IMAGE_HEIGHT: 1280 + diff --git a/examples/hf-operator/dataflow_robot_vlm_minimize.yml b/examples/hf-operator/dataflow_robot_vlm_minimize.yml new file mode 100644 index 0000000000000000000000000000000000000000..52da971812f4408184cca303e66ca14cb07fe8f9 --- /dev/null +++ b/examples/hf-operator/dataflow_robot_vlm_minimize.yml @@ -0,0 +1,42 @@ +nodes: + - id: webcam + custom: + source: ../operators/opencv_stream.py + outputs: + - image + - id: idefics2 + operator: + python: ../operators/idefics2_op.py + inputs: + image: + source: webcam/image + queue_size: 1 + text: whisper/text + outputs: + - speak + - control + - id: robot + custom: + source: /home/peter/miniconda3/envs/robomaster/bin/python + args: ../operators/robot_minimize.py + inputs: + control: idefics2/control + - id: parler + operator: + python: ../operators/parler_op.py + inputs: + text: + source: idefics2/speak + queue_size: 1 + - id: plot_bot + operator: + python: ../operators/plot.py + inputs: + image: webcam/image + - id: whisper + operator: + python: ../operators/whisper_op.py + inputs: + audio: dora/timer/millis/1000 + outputs: + - text \ No newline at end of file diff --git a/examples/hf-operator/dataflow_vlm_basic.yml b/examples/hf-operator/dataflow_vlm_basic.yml new file mode 100644 index 0000000000000000000000000000000000000000..6e55b82c4654d63cd6bc4ddb46ab08a46cfea340 --- /dev/null +++ b/examples/hf-operator/dataflow_vlm_basic.yml @@ -0,0 +1,65 @@ +nodes: + # - id: plot + # custom: + # source: dora-rerun + # inputs: + # image: webcam/image + # textlog_whisper: whisper/text + # textlog_vlm: bunny/speak + # envs: + # IMAGE_WIDTH: 1280 + # IMAGE_HEIGHT: 720 + # IMAGE_DEPTH: 3 + # RERUN_MEMORY_LIMIT: 10% + + - id: bunny + operator: + source: /root/miniconda3/envs/idefices2/bin/python + python: bunny_demo.py + inputs: + image: + source: webcam/image + queue_size: 1 + text: whisper/text + outputs: + - speak + - control + + - id: robot + custom: + source: /root/miniconda3/envs/robomaster/bin/python + args: robot.py + inputs: + control: bunny/control + led: reload/led + + - id: webcam + custom: + source: opencv_stream.py + outputs: + - image + + - id: whisper + custom: + source: whisper_op.py + inputs: + audio: dora/timer/millis/1000 + outputs: + - text + + - id: parler + operator: + source: /root/miniconda3/envs/idefices2/bin/python + python: parler_op.py + inputs: + text: + source: bunny/speak + queue_size: 1 + + - id: reload + operator: + python: reload.py + inputs: + image: dora/timer/millis/500 + outputs: + - led \ No newline at end of file diff --git a/examples/hf-operator/dataflow_vlm_policy.yml b/examples/hf-operator/dataflow_vlm_policy.yml new file mode 100644 index 0000000000000000000000000000000000000000..ab4694d26431be335c757ca73eb6dc43cf10478b --- /dev/null +++ b/examples/hf-operator/dataflow_vlm_policy.yml @@ -0,0 +1,58 @@ +nodes: + - id: plot + custom: + source: dora-rerun + inputs: + image: webcam/image + textlog_whisper: whisper/text + envs: + IMAGE_WIDTH: 1280 + IMAGE_HEIGHT: 720 + IMAGE_DEPTH: 3 + RERUN_MEMORY_LIMIT: 10% + + - id: policy + operator: + python: ../operators/policy.py + inputs: + init: llm/init + reached_kitchen: robot/reached_kitchen + reached_living_room: robot/reached_living_room + reached_office: robot/reached_office + outputs: + - go_to + + - id: llm + operator: + python: ../operators/llm_op.py + inputs: + text: whisper/text + outputs: + - init + + - id: robot + custom: + source: /home/peter/miniconda3/envs/robomaster/bin/python + args: ../operators/robot_minimize.py + inputs: + # control: idefics2/control + go_to: policy/go_to + outputs: + - reached_kitchen + - reached_living_room + - reached_office + + - id: webcam + custom: + source: ../operators/opencv_stream.py + outputs: + - image + + - id: whisper + custom: + source: ../operators/whisper_op.py + inputs: + audio: dora/timer/millis/1000 + outputs: + - text + diff --git a/examples/hf-operator/idefics2_op.py b/examples/hf-operator/idefics2_op.py new file mode 100644 index 0000000000000000000000000000000000000000..561f46e18f932759a09bffcef5a2cfcd8255732d --- /dev/null +++ b/examples/hf-operator/idefics2_op.py @@ -0,0 +1,125 @@ +from dora import DoraStatus +import pyarrow as pa +from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig +import torch +import time +import awq_ext +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 +PROCESSOR = AutoProcessor.from_pretrained("/home/peiji/idefics2-8b-AWQ") +BAD_WORDS_IDS = PROCESSOR.tokenizer( + ["", ""], add_special_tokens=False +).input_ids +EOS_WORDS_IDS = PROCESSOR.tokenizer( + "", add_special_tokens=False +).input_ids + [PROCESSOR.tokenizer.eos_token_id] +model = AutoModelForVision2Seq.from_pretrained( + "/home/peiji/idefics2-8b-AWQ", + quantization_config=AwqConfig( + bits=4, + fuse_max_seq_len=4096, + modules_to_fuse={ + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + "num_attention_heads": 32, + "num_key_value_heads": 8, + "hidden_size": 4096, + }, + ), + trust_remote_code=True, +).to("cuda") + + +def reset_awq_cache(model): + """ + Simple method to reset the AWQ fused modules cache + """ + from awq.modules.fused.attn import QuantAttentionFused + + for name, module in model.named_modules(): + if isinstance(module, QuantAttentionFused): + module.start_pos = 0 + + +def ask_vlm(image, instruction): + global model + prompts = [ + "User:", + image, + f"{instruction}.\n", + "Assistant:", + ] + inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()} + + generated_ids = model.generate( + **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=25, repetition_penalty=1.2 + ) + generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True) + reset_awq_cache(model) + return generated_texts[0].split("\nAssistant: ")[1] + + +class Operator: + def __init__(self): + self.state = "person" + self.last_output = False + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + image = ( + dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + + if self.state == "person": + output = ask_vlm(image, "Can you read the note?").lower() + print(output, flush=True) + if "coffee" in output or "tea" in output or "water" in output: + send_output( + "control", + pa.array([-3.0, 0.0, 0.0, 0.8, 0.0, 10.0, 180.0]), + ) + send_output( + "speak", + pa.array([output + ". Going to the kitchen."]), + ) + time.sleep(10) + self.state = "coffee" + self.last_output = False + elif not self.last_output: + self.last_output = True + send_output( + "speak", + pa.array([output]), + ) + time.sleep(4) + + elif self.state == "coffee": + output = ask_vlm(image, "Is there a person with a hands up?").lower() + print(output, flush=True) + if "yes" in output: + send_output( + "speak", + pa.array([output + ". Going to the office."]), + ) + send_output( + "control", + pa.array([2.0, 0.0, 0.0, 0.8, 0.0, 10.0, 0.0]), + ) + time.sleep(10) + self.state = "person" + self.last_output = False + elif not self.last_output: + self.last_output = True + send_output( + "speak", + pa.array([output]), + ) + time.sleep(4) + + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/idefics2_op_demo.py b/examples/hf-operator/idefics2_op_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..8ecda87970d4f173377c83580078d300c9691cbf --- /dev/null +++ b/examples/hf-operator/idefics2_op_demo.py @@ -0,0 +1,123 @@ + +import sys +print("Python Version:", sys.version) +from dora import DoraStatus +import pyarrow as pa +from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig +import torch +import gc +import awq_ext + +CAMERA_WIDTH = 1280 +CAMERA_HEIGHT = 720 +PROCESSOR = AutoProcessor.from_pretrained("/home/peiji/idefics2-8b-AWQ") +BAD_WORDS_IDS = PROCESSOR.tokenizer( + ["", ""], add_special_tokens=False +).input_ids +EOS_WORDS_IDS = PROCESSOR.tokenizer( + "", add_special_tokens=False +).input_ids + [PROCESSOR.tokenizer.eos_token_id] +model = AutoModelForVision2Seq.from_pretrained( + "/home/peiji/idefics2-8b-AWQ", + quantization_config=AwqConfig( + bits=4, + fuse_max_seq_len=4096, + modules_to_fuse={ + "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], + "mlp": ["gate_proj", "up_proj", "down_proj"], + "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], + "use_alibi": False, + "num_attention_heads": 32, + "num_key_value_heads": 8, + "hidden_size": 4096, + }, + ), + trust_remote_code=True, +).to("cuda") +print("load idefics2 model finish") + + +def reset_awq_cache(model): + """ + Simple method to reset the AWQ fused modules cache + """ + from awq.modules.fused.attn import QuantAttentionFused + + for name, module in model.named_modules(): + if isinstance(module, QuantAttentionFused): + module.start_pos = 0 + + +def ask_vlm(image, instruction): + global model + prompts = [ + "User:", + image, + f"{instruction}.\n", + "Assistant:", + ] + inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()} + + generated_ids = model.generate( + **inputs, + bad_words_ids=BAD_WORDS_IDS, + max_new_tokens=25, + repetition_penalty=1.2, + ) + generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True) + reset_awq_cache(model) + + gc.collect() + torch.cuda.empty_cache() + return generated_texts[0].split("\nAssistant: ")[1] + + +import time + + +class Operator: + def __init__(self): + self.image = None + self.text = None + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + if dora_event["id"] == "image": + self.image = ( + dora_event["value"] + .to_numpy() + .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + elif dora_event["id"] == "text": + self.text = dora_event["value"][0].as_py() + output = ask_vlm(self.image, self.text).lower() + send_output( + "speak", + pa.array([output]), + ) + if "yes" in output: + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 0.0]), + ) + time.sleep(2) + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]), + ) + elif "no" in output: + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0]), + ) + time.sleep(2) + send_output( + "control", + pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]), + ) + + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/llm_op.py b/examples/hf-operator/llm_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ce42aabebd5af272c8222c690ed0199c42c9efd7 --- /dev/null +++ b/examples/hf-operator/llm_op.py @@ -0,0 +1,234 @@ +from dora import DoraStatus +import pylcs +import os +import pyarrow as pa +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch + +import gc # garbage collect library +import re +import time + +CHATGPT = False +MODEL_NAME_OR_PATH = "/home/peiji/deepseek-coder-6.7B-instruct-GPTQ/" + +CODE_MODIFIER_TEMPLATE = """ +### Instruction +Respond with one block of modified code only in ```python block. No explaination. + +```python +{code} +``` + +{user_message} + +### Response: +""" + + +model = AutoModelForCausalLM.from_pretrained( + MODEL_NAME_OR_PATH, + device_map="auto", + trust_remote_code=True, + revision="main", + max_length=1024, +).to("cuda:0") + + +tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True) + + +def extract_python_code_blocks(text): + """ + Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. + + Parameters: + - text: A string that may contain one or more Python code blocks. + + Returns: + - A list of strings, where each string is a block of Python code extracted from the text. + """ + pattern = r"```python\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```python\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + else: + matches = [remove_last_line(matches[0])] + + return matches + + +def remove_last_line(python_code): + """ + Removes the last line from a given string of Python code. + + Parameters: + - python_code: A string representing Python source code. + + Returns: + - A string with the last line removed. + """ + lines = python_code.split("\n") # Split the string into lines + if lines: # Check if there are any lines to remove + lines.pop() # Remove the last line + return "\n".join(lines) # Join the remaining lines back into a string + + +def calculate_similarity(source, target): + """ + Calculate a similarity score between the source and target strings. + This uses the edit distance relative to the length of the strings. + """ + edit_distance = pylcs.edit_distance(source, target) + max_length = max(len(source), len(target)) + # Normalize the score by the maximum possible edit distance (the length of the longer string) + similarity = 1 - (edit_distance / max_length) + return similarity + + +def find_best_match_location(source_code, target_block): + """ + Find the best match for the target_block within the source_code by searching line by line, + considering blocks of varying lengths. + """ + source_lines = source_code.split("\n") + target_lines = target_block.split("\n") + + best_similarity = 0 + best_start_index = 0 + best_end_index = -1 + + # Iterate over the source lines to find the best matching range for all lines in target_block + for start_index in range(len(source_lines) - len(target_lines) + 1): + for end_index in range(start_index + len(target_lines), len(source_lines) + 1): + current_window = "\n".join(source_lines[start_index:end_index]) + current_similarity = calculate_similarity(current_window, target_block) + if current_similarity > best_similarity: + best_similarity = current_similarity + best_start_index = start_index + best_end_index = end_index + + # Convert line indices back to character indices for replacement + char_start_index = len("\n".join(source_lines[:best_start_index])) + ( + 1 if best_start_index > 0 else 0 + ) + char_end_index = len("\n".join(source_lines[:best_end_index])) + + return char_start_index, char_end_index + + +def replace_code_in_source(source_code, replacement_block: str): + """ + Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. + """ + replacement_block = extract_python_code_blocks(replacement_block)[0] + start_index, end_index = find_best_match_location(source_code, replacement_block) + if start_index != -1 and end_index != -1: + # Replace the best matching part with the replacement block + new_source = ( + source_code[:start_index] + replacement_block + source_code[end_index:] + ) + return new_source + else: + return source_code + + +class Operator: + def __init__(self) -> None: + self.policy_init = False + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + global model, tokenizer + if dora_event["type"] == "INPUT" and dora_event["id"] == "text": + input = dora_event["value"][0].as_py() + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + path = current_directory + "/policy.py" + + with open(path, "r", encoding="utf8") as f: + code = f.read() + + user_message = input + start_llm = time.time() + + output = self.ask_llm( + CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) + ) + + source_code = replace_code_in_source(code, output) + print("response time:", time.time() - start_llm, flush=True) + + print("response: ", output, flush=True) + with open(path, "w") as file: + file.write(source_code) + + gc.collect() + torch.cuda.empty_cache() + + return DoraStatus.CONTINUE + + def ask_llm(self, prompt): + + # Generate output + # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt)) + input = tokenizer(prompt, return_tensors="pt") + input_ids = input.input_ids.cuda() + + # add attention mask here + attention_mask = input.attention_mask.cuda() + + output = model.generate( + inputs=input_ids, + temperature=0.7, + do_sample=True, + top_p=0.95, + top_k=40, + max_new_tokens=512, + attention_mask=attention_mask, + eos_token_id=tokenizer.eos_token_id, + ) + # Get the tokens from the output, decode them, print them + + # Get text between im_start and im_end + return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :] + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "/policy.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "text", + "value": pa.array( + [ + { + "path": path, + "user_message": "go to the office, and then, say I know that you work hard, so I brought some a chocolate, wait for 10 seconds, and then play the office song and then go to the kitchen,", + }, + ] + ), + "metadata": [], + }, + print, + ) diff --git a/examples/hf-operator/object_detection.py b/examples/hf-operator/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2de2e9c5ae839a61895dc77853450ac09d9f9c --- /dev/null +++ b/examples/hf-operator/object_detection.py @@ -0,0 +1,121 @@ +# import numpy as np +# import pyarrow as pa +# from dora import Node +# from dora import DoraStatus +# from ultralytics import YOLO +# import cv2 +# pa.array([]) + +# CAMERA_WIDTH = 720 +# CAMERA_HEIGHT = 1280 + +# model = YOLO("/home/peiji/yolov8n.pt") +# node = Node() + +# # class Operator: +# # """ +# # Infering object from images +# # """ + +# # def on_event( +# # self, +# # dora_event, +# # send_output, +# # ) -> DoraStatus: +# # if dora_event["type"] == "INPUT": +# # frame = ( +# # dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) +# # ) +# # frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) +# # results = model(frame, verbose=False) # includes NMS +# # boxes = np.array(results[0].boxes.xyxy.cpu()) +# # conf = np.array(results[0].boxes.conf.cpu()) +# # label = np.array(results[0].boxes.cls.cpu()) +# # # concatenate them together +# # arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + +# # send_output("bbox", pa.array(arrays.ravel()), dora_event["metadata"]) + +# # return DoraStatus.CONTINUE +# for event in node: +# print("djieoajdsaosijoi") +# event_type = event["type"] +# if event_type == "INPUT": +# event_id = event["id"] +# if event_id == "image": +# print("[object detection] received image input") +# image = event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + +# frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) +# frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) +# results = model(frame) # includes NMS +# # Process results +# boxes = np.array(results[0].boxes.xywh.cpu()) +# conf = np.array(results[0].boxes.conf.cpu()) +# label = np.array(results[0].boxes.cls.cpu()) +# # concatenate them together +# arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + +# node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) +# else: +# print("[object detection] ignoring unexpected input:", event_id) +# elif event_type == "STOP": +# print("[object detection] received stop") +# elif event_type == "ERROR": +# print("[object detection] error: ", event["error"]) +# else: +# print("[object detection] received unexpected event:", event_type) + + + + + + + + + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import cv2 +import numpy as np +from ultralytics import YOLO + +from dora import Node +import pyarrow as pa +node = Node() +model = YOLO("/home/peiji/yolov8n.pt") + + + +IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 1280)) +IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 720)) + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + event_id = event["id"] + if event_id == "image": + print("[object detection] received image input") + image = event["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) + + frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = model(frame) # includes NMS + # Process results + boxes = np.array(results[0].boxes.xywh.cpu()) + conf = np.array(results[0].boxes.conf.cpu()) + label = np.array(results[0].boxes.cls.cpu()) + # concatenate them together + arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + + node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) + else: + print("[object detection] ignoring unexpected input:", event_id) + elif event_type == "STOP": + print("[object detection] received stop") + elif event_type == "ERROR": + print("[object detection] error: ", event["error"]) + else: + print("[object detection] received unexpected event:", event_type) diff --git a/examples/hf-operator/opencv_stream.py b/examples/hf-operator/opencv_stream.py new file mode 100644 index 0000000000000000000000000000000000000000..9d454391f88154a47785120e351b41119ad01239 --- /dev/null +++ b/examples/hf-operator/opencv_stream.py @@ -0,0 +1,39 @@ +import cv2 +import pyarrow as pa +from dora import Node + +node = Node() +# TCP stream URL (replace with your stream URL) +# TCP_STREAM_URL = "tcp://192.168.2.1:40922" +STREAM_URL = "tcp://192.168.2.1:40921" + +# TCP_STREAM_URL = "/home/peiji/截图/截图 2024-02-22 10-48-33.png" + +# Global variables, change it to adapt your needs + + +CAMERA_WIDTH = 1920 +CAMERA_HEIGHT = 1080 + + + +# Create a VideoCapture object using the TCP stream URL +cap = cv2.VideoCapture(STREAM_URL) + +# Check if the VideoCapture object opened successfully +assert cap.isOpened(), "Error: Could not open video capture." +# print(cap.isOpened()) +while True: + # Read a frame from the stream + ret, frame = cap.read() + + if not ret: + break # Break the loop when no more frames are available + frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + + node.send_output("image", pa.array(frame.ravel())) + + +# Release the VideoCapture object and any OpenCV windows +cap.release() +cv2.destroyAllWindows() diff --git a/examples/hf-operator/parler_op.py b/examples/hf-operator/parler_op.py new file mode 100644 index 0000000000000000000000000000000000000000..fc287a0768a863a28829a304722868d7054daaa2 --- /dev/null +++ b/examples/hf-operator/parler_op.py @@ -0,0 +1,52 @@ +from parler_tts import ParlerTTSForConditionalGeneration +from transformers import AutoTokenizer +import soundfile as sf +import pygame +from dora import DoraStatus + +model = ParlerTTSForConditionalGeneration.from_pretrained( + "/mnt/c/parler-tts-mini-jenny-30H" +).to("cuda:0") +tokenizer = AutoTokenizer.from_pretrained("/mnt/c/parler-tts-mini-jenny-30H") + +pygame.mixer.init() + +input_ids = tokenizer( + "Jenny delivers her words quite expressively, in a very confined sounding environment with clear audio quality.", + return_tensors="pt", +).input_ids.to("cuda:0") + + +class Operator: + def on_event( + self, + dora_event, + send_output, + ): + if dora_event["type"] == "INPUT": + generation = model.generate( + input_ids=input_ids, + min_new_tokens=100, + prompt_input_ids=tokenizer( + dora_event["value"][0].as_py(), return_tensors="pt" + ).input_ids.to("cuda:0"), + ) + print(dora_event["value"][0].as_py(), flush=True) + sf.write( + f"parler_tts_out.wav", + generation.cpu().numpy().squeeze(), + model.config.sampling_rate, + ) + + pygame.mixer.music.load(f"parler_tts_out.wav") + pygame.mixer.music.play() + while pygame.mixer.get_busy(): + pass + + return DoraStatus.CONTINUE + + +# op = Operator() +# import pyarrow as pa + +# op.on_event({"type": "INPUT", "value": pa.array(["Hello, how are you?"])}, None) diff --git a/examples/hf-operator/planning_op.py b/examples/hf-operator/planning_op.py new file mode 100644 index 0000000000000000000000000000000000000000..28d2c36f4670b887f0ce833235bf98502ad9d397 --- /dev/null +++ b/examples/hf-operator/planning_op.py @@ -0,0 +1,189 @@ +import time +import numpy as np +import pyarrow as pa +from dora import DoraStatus +from constants import LOCATION + +CAMERA_WIDTH = 1920 +CAMERA_HEIGHT = 1080 + + +def check_clear_road(bboxes, image_width, goal_x): + """ + Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap. + + Parameters: + - bboxes (np.array): A numpy array where each row represents a bounding box with + the format [min_x, min_y, max_x, max_y, confidence, label]. + - image_width (int): The width of the image in pixels. + + Returns: + - int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap. + """ + if bboxes.size == 0: + # No bounding boxes, return the midpoint of the image as the largest gap + return goal_x + + events = [] + for bbox in bboxes: + min_x, max_x = bbox[0], bbox[2] + events.append((min_x, "enter")) + events.append((max_x, "exit")) + + # Include image boundaries as part of the events + events.append( + (0, "exit") + ) # Start of the image, considered an 'exit' point for logic simplicity + events.append( + (image_width, "enter") + ) # End of the image, considered an 'enter' point + + # Sort events, with exits before enters at the same position to ensure gap calculation correctness + events.sort(key=lambda x: (x[0], x[1] == "enter")) + + # Sweep line algorithm to find the largest gap + current_boxes = 1 + last_x = 0 + largest_gap = 0 + gap_start_x = None + largest_gap_mid = None # Midpoint of the largest gap + + for x, event_type in events: + if current_boxes == 0 and gap_start_x is not None: + # Calculate gap + gap = x - gap_start_x + gap_end_x = gap_start_x + x + if goal_x < gap_end_x and goal_x > gap_start_x: + return True + elif goal_x < gap_start_x: + return False + if event_type == "enter": + current_boxes += 1 + if current_boxes == 1: + gap_start_x = None # No longer in a gap + elif event_type == "exit": + current_boxes -= 1 + if current_boxes == 0: + gap_start_x = x # Start of a potential gap + + return False + + +class Operator: + def __init__(self): + self.bboxs = None + self.time = time.time() + self.position = [0, 0, 0] + self.waypoints = None + self.tf = np.array([[1, 0], [0, 1]]) + self.count = 0 + self.completed = True + self.image = None + self.goal = "" + self.current_location = "HOME" + + def on_event( + self, + dora_event: dict, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + id = dora_event["id"] + if id == "image": + value = dora_event["value"].to_numpy() + + self.image = value.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + elif id == "control_reply": + value = dora_event["value"].to_numpy()[0] + if value == self.count: + self.completed = True + elif id == "set_goal": + self.goal = dora_event["value"][0].as_py() + print("got goal:", self.goal, flush=True) + + if len(dora_event["value"]) > 0: + if self.goal != "": + self.waypoints = LOCATION[self.current_location][self.goal] + + elif id == "position": + print("got position:", dora_event["value"], flush=True) + value = dora_event["value"].to_numpy() + [x, y, z] = value + self.position = [x, y, z] + if self.image is None: + print("no image", flush=True) + return DoraStatus.CONTINUE + ## No bounding box yet + if self.completed == False: + print("not completed", flush=True) + return DoraStatus.CONTINUE + + if self.waypoints is None: + print("no waypoint", flush=True) + return DoraStatus.CONTINUE + # Set Waypoints to None if goal reached + # Remove waypoints if completed + elif ( + self.waypoints.shape[0] == 1 + and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.2 + ): + print(f"goal {self.goal} reached", flush=True) + self.current_location = self.goal + send_output( + f"reached_{self.goal.lower()}", pa.array(self.image.ravel()) + ) + self.waypoints = None + return DoraStatus.CONTINUE + elif ( + self.waypoints.size > 0 + and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.1 + ): + self.waypoints = self.waypoints[1:] + print("removing waypoints", flush=True) + + z = np.deg2rad(z) + self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]]) + goal = self.tf.dot(self.waypoints[0] - np.array([x, y])) + goal_camera_x = ( + CAMERA_WIDTH * np.arctan2(goal[1], goal[0]) / np.pi + ) + CAMERA_WIDTH / 2 + goal_angle = np.arctan2(goal[1], goal[0]) * 180 / np.pi + print( + "position", + [x, y], + "goal:", + goal, + "Goal angle: ", + np.arctan2(goal[1], goal[0]) * 180 / np.pi, + "z: ", + np.rad2deg(z), + "x: ", + goal_camera_x, + "count: ", + self.count, + flush=True, + ) + + self.count += 1 + self.completed = False + + message = pa.array( + [ + self.waypoints[0][0] - x, + self.waypoints[0][1] - y, + 0.0, # -goal_angle, + 0.8, + 0.0, # 50, + 10.0, + float(int(goal_angle)), + self.count, + ] + ) + print("sending:", message, flush=True) + send_output( + "control", + message, + dora_event["metadata"], + ) + + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/plot.py b/examples/hf-operator/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..1393770dd0f523aa39b32d62e433177e6eb41a0f --- /dev/null +++ b/examples/hf-operator/plot.py @@ -0,0 +1,206 @@ +import cv2 +import numpy as np +from dora import Node +from dora import DoraStatus + + +CAMERA_WIDTH = 720 +CAMERA_HEIGHT = 1280 + +FONT = cv2.FONT_HERSHEY_SIMPLEX + +writer = cv2.VideoWriter( + "output01.avi", + cv2.VideoWriter_fourcc(*"MJPG"), + 60, + (CAMERA_WIDTH, CAMERA_HEIGHT), +) + +GOAL_OBJECTIVES = [10, 0] + +import numpy as np + + +def find_largest_gap_midpoint(bboxes, image_width, goal_x): + """ + Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap. + + Parameters: + - bboxes (np.array): A numpy array where each row represents a bounding box with + the format [min_x, min_y, max_x, max_y, confidence, label]. + - image_width (int): The width of the image in pixels. + + Returns: + - int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap. + """ + if bboxes.size == 0: + # No bounding boxes, return the midpoint of the image as the largest gap + return image_width // 2 + + events = [] + for bbox in bboxes: + min_x, max_x = bbox[0], bbox[2] + events.append((min_x, "enter")) + events.append((max_x, "exit")) + + # Include image boundaries as part of the events + events.append( + (0, "exit") + ) # Start of the image, considered an 'exit' point for logic simplicity + events.append( + (image_width, "enter") + ) # End of the image, considered an 'enter' point + + # Sort events, with exits before enters at the same position to ensure gap calculation correctness + events.sort(key=lambda x: (x[0], x[1] == "enter")) + + # Sweep line algorithm to find the largest gap + current_boxes = 1 + last_x = 0 + largest_gap = 0 + gap_start_x = None + largest_gap_mid = None # Midpoint of the largest gap + + for x, event_type in events: + if current_boxes == 0 and gap_start_x is not None: + # Calculate gap + gap = x - gap_start_x + if gap > largest_gap: + largest_gap = gap + gap_end_x = gap_start_x + x + largest_gap_mid = (gap_start_x + x) // 2 + if goal_x < gap_end_x and goal_x > gap_start_x: + return goal_x + return largest_gap_mid + # elif goal_x > gap_end_x: + # return max(gap_end_x - 50, largest_gap_mid) + # elif goal_x < gap_start_x: + # return min(gap_start_x + 50, largest_gap_mid) + + if event_type == "enter": + current_boxes += 1 + if current_boxes == 1: + gap_start_x = None # No longer in a gap + elif event_type == "exit": + current_boxes -= 1 + if current_boxes == 0: + gap_start_x = x # Start of a potential gap + + return largest_gap_mid + + +class Operator: + """ + Plot image and bounding box + """ + + def __init__(self): + self.bboxs = [] + self.buffer = "" + self.submitted = [] + self.lines = [] + self.gap_x = CAMERA_WIDTH // 2 + self.position = [0, 0, 0] + + def on_event( + self, + dora_event, + send_output, + ): + if dora_event["type"] == "INPUT": + id = dora_event["id"] + value = dora_event["value"] + + if id == "position": + + value = dora_event["value"].to_numpy() + [x, y, z] = value + self.position = [x, y, z] + + if id == "image": + + image = ( + value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy() + ) + cv2.resize(image, (CAMERA_HEIGHT * 2, CAMERA_WIDTH * 2)) + + cv2.putText( + image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2 + ) + cv2.putText( + image, + f"pos: {self.position}", + (20, 20), + FONT, + 0.5, + (190, 250, 100), + 2, + ) + + i = 0 + for text in self.submitted[::-1]: + color = ( + (0, 255, 190) + if text["role"] == "user_message" + else (0, 190, 255) + ) + cv2.putText( + image, + text["content"], + ( + 20, + 14 + (13 - i) * 25, + ), + FONT, + 0.5, + color, + 2, + ) + i += 1 + writer.write(image) + cv2.resize(image, (CAMERA_HEIGHT * 3, CAMERA_WIDTH * 3)) + cv2.imshow("frame", image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + elif id == "keyboard_buffer": + self.buffer = value[0].as_py() + elif id == "bbox": + self.bboxs = value.to_numpy().reshape((-1, 6)) + + self.gap_x = find_largest_gap_midpoint( + self.bboxs, image_width=CAMERA_WIDTH, goal_x=10 + ) + elif "message" in id: + self.submitted += [ + { + "role": id, + "content": value[0] + .as_py() + .replace("\n", " ") + .replace("- ", ""), + } + ] + + return DoraStatus.CONTINUE + +operator = Operator() +node = Node() + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + status = operator.on_event(event) + if status == DoraStatus.CONTINUE: + pass + elif status == DoraStatus.STOP: + print("plotter returned stop status") + break + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) + + +## Angle = Arctan Proj Object y / x + +## Relation linearire 0 - 60 ; 0 - CAMERA_WIDTH diff --git a/examples/hf-operator/policy.py b/examples/hf-operator/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..c453a1896ad15d47cf8beb978dc6d31eb0fba081 --- /dev/null +++ b/examples/hf-operator/policy.py @@ -0,0 +1,29 @@ +import pyarrow as pa +from dora import DoraStatus +from utils import speak +from time import sleep + + +class Operator: + def __init__(self): + self.location = ["KITCHEN", "OFFICE"] + self.current_location = "KITCHEN" + + def speak(self, text: str): + speak(text) + + def on_event(self, event, send_output): + if event["type"] == "INPUT": + id = event["id"] + # On initialization + if id == "speech": + text: str = event["value"][0].as_py().lower() + if "stop" in text: + return DoraStatus.STOP + # send_output("go_to", pa.array([""])) + elif id == "reached_office": + pass + elif id == "reached_kitchen": + pass + + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/reload.py b/examples/hf-operator/reload.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb0192ee8f1f54fbf536dd0f3e804f59007527c --- /dev/null +++ b/examples/hf-operator/reload.py @@ -0,0 +1,18 @@ +from dora import DoraStatus +import pyarrow as pa + + +class Operator: + def __init__(self): + self.image = None + self.text = None + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + send_output("led", pa.array([255, 0, 0])) + pass + return DoraStatus.CONTINUE diff --git a/examples/hf-operator/requirements.txt b/examples/hf-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3aef37f5c26fb8769a969803cfa139b9adf6382b --- /dev/null +++ b/examples/hf-operator/requirements.txt @@ -0,0 +1,106 @@ +absl-py==2.1.0 +accelerate==0.31.0 +asttokens==2.4.1 +backcall==0.2.0 +cachetools==5.3.3 +certifi==2024.6.2 +charset-normalizer==3.3.2 +contourpy==1.1.1 +cycler==0.12.1 +decorator==5.1.1 +dora-rs==0.3.4 +executing==2.0.1 +filelock==3.14.0 +fonttools==4.50.0 +fsspec==2024.6.0 +gitdb==4.0.11 +GitPython==3.1.43 +google-auth==2.30.0 +google-auth-oauthlib==1.0.0 +grpcio==1.64.1 +huggingface-hub==0.23.3 +idna==3.7 +imageio==2.34.1 +importlib_metadata==7.1.0 +importlib_resources==6.3.1 +ipython==8.12.3 +jedi==0.19.1 +Jinja2==3.1.4 +kiwisolver==1.4.5 +Markdown==3.6 +MarkupSafe==2.1.5 +matplotlib==3.7.5 +matplotlib-inline==0.1.7 +maturin==1.6.0 +mpmath==1.3.0 +MyQR==2.3.1 +netaddr==1.3.0 +netifaces==0.11.0 +networkx==3.1 +numpy==1.24.4 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==8.9.2.26 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.20.5 +nvidia-nvjitlink-cu12==12.5.40 +nvidia-nvtx-cu12==12.1.105 +oauthlib==3.2.2 +opencv-python==4.10.0.82 +packaging==24.0 +pandas==2.0.3 +parso==0.8.4 +pexpect==4.9.0 +pickleshare==0.7.5 +pillow==10.2.0 +prompt_toolkit==3.0.46 +protobuf==5.27.1 +psutil==5.9.8 +ptyprocess==0.7.0 +pure-eval==0.2.2 +py-cpuinfo==9.0.0 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +Pygments==2.18.0 +pyparsing==3.1.2 +python-dateutil==2.9.0.post0 +pytz==2024.1 +PyYAML==6.0.1 +regex==2024.5.15 +requests==2.32.3 +requests-oauthlib==2.0.0 +robomaster==0.1.1.68 +rsa==4.9 +safetensors==0.4.3 +scipy==1.10.1 +seaborn==0.13.2 +six==1.16.0 +smmap==5.0.1 +stack-data==0.6.3 +sympy==1.12.1 +tensorboard==2.14.0 +tensorboard-data-server==0.7.2 +thop==0.1.1.post2209072238 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.3.1 +torchvision==0.18.1 +tqdm==4.66.4 +traitlets==5.14.3 +transformers==4.40.0 +triton==2.3.1 +typing_extensions==4.12.1 +tzdata==2024.1 +ultralytics==8.2.28 +ultralytics-thop==0.2.7 +urllib3==2.2.1 +wcwidth==0.2.13 +Werkzeug==3.0.3 +whisper==1.1.10 +zipp==3.18.1 diff --git a/examples/hf-operator/requirements_robot.txt b/examples/hf-operator/requirements_robot.txt new file mode 100644 index 0000000000000000000000000000000000000000..3aef37f5c26fb8769a969803cfa139b9adf6382b --- /dev/null +++ b/examples/hf-operator/requirements_robot.txt @@ -0,0 +1,106 @@ +absl-py==2.1.0 +accelerate==0.31.0 +asttokens==2.4.1 +backcall==0.2.0 +cachetools==5.3.3 +certifi==2024.6.2 +charset-normalizer==3.3.2 +contourpy==1.1.1 +cycler==0.12.1 +decorator==5.1.1 +dora-rs==0.3.4 +executing==2.0.1 +filelock==3.14.0 +fonttools==4.50.0 +fsspec==2024.6.0 +gitdb==4.0.11 +GitPython==3.1.43 +google-auth==2.30.0 +google-auth-oauthlib==1.0.0 +grpcio==1.64.1 +huggingface-hub==0.23.3 +idna==3.7 +imageio==2.34.1 +importlib_metadata==7.1.0 +importlib_resources==6.3.1 +ipython==8.12.3 +jedi==0.19.1 +Jinja2==3.1.4 +kiwisolver==1.4.5 +Markdown==3.6 +MarkupSafe==2.1.5 +matplotlib==3.7.5 +matplotlib-inline==0.1.7 +maturin==1.6.0 +mpmath==1.3.0 +MyQR==2.3.1 +netaddr==1.3.0 +netifaces==0.11.0 +networkx==3.1 +numpy==1.24.4 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==8.9.2.26 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.20.5 +nvidia-nvjitlink-cu12==12.5.40 +nvidia-nvtx-cu12==12.1.105 +oauthlib==3.2.2 +opencv-python==4.10.0.82 +packaging==24.0 +pandas==2.0.3 +parso==0.8.4 +pexpect==4.9.0 +pickleshare==0.7.5 +pillow==10.2.0 +prompt_toolkit==3.0.46 +protobuf==5.27.1 +psutil==5.9.8 +ptyprocess==0.7.0 +pure-eval==0.2.2 +py-cpuinfo==9.0.0 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +Pygments==2.18.0 +pyparsing==3.1.2 +python-dateutil==2.9.0.post0 +pytz==2024.1 +PyYAML==6.0.1 +regex==2024.5.15 +requests==2.32.3 +requests-oauthlib==2.0.0 +robomaster==0.1.1.68 +rsa==4.9 +safetensors==0.4.3 +scipy==1.10.1 +seaborn==0.13.2 +six==1.16.0 +smmap==5.0.1 +stack-data==0.6.3 +sympy==1.12.1 +tensorboard==2.14.0 +tensorboard-data-server==0.7.2 +thop==0.1.1.post2209072238 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.3.1 +torchvision==0.18.1 +tqdm==4.66.4 +traitlets==5.14.3 +transformers==4.40.0 +triton==2.3.1 +typing_extensions==4.12.1 +tzdata==2024.1 +ultralytics==8.2.28 +ultralytics-thop==0.2.7 +urllib3==2.2.1 +wcwidth==0.2.13 +Werkzeug==3.0.3 +whisper==1.1.10 +zipp==3.18.1 diff --git a/examples/hf-operator/robot.py b/examples/hf-operator/robot.py new file mode 100644 index 0000000000000000000000000000000000000000..9a77ce080fc0a1b1201a719e5fcd3170e1ccac40 --- /dev/null +++ b/examples/hf-operator/robot.py @@ -0,0 +1,64 @@ +from robomaster import robot, led + +from dora import Node +from time import sleep +import numpy as np +import pyarrow as pa + + +CONN = "ap" + + +ep_robot = robot.Robot() +print("Initializing robot...", flush=True) +assert ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot" +assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream" + +node = Node() +ep_robot.gimbal.recenter().wait_for_completed() +backlog = [] +last_control = "" +position = np.array([0.0, 0.0, 0.0]) +count = -1 +event = None +rgb = [0, 0, 0] + + +def wait(event): + if event is not None and not (event._event.isSet() and event.is_completed): + sleep(1) + + +for dora_event in node: + event_type = dora_event["type"] + if event_type == "INPUT": + if dora_event["id"] == "tick": + node.send_output("position", pa.array(position)) + node.send_output("control_reply", pa.array([count])) + + elif dora_event["id"] == "planning_control": + [x, y, z, xy_speed, z_speed, pitch, yaw, count] = dora_event[ + "value" + ].to_numpy() + if any([pitch, yaw]): + event = ep_robot.gimbal.moveto( + pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 + ) + wait(event) + sleep(2) + if any([x, y, z]): + event = ep_robot.chassis.move( + x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed + ) + position = position + np.array([x, y, z]) + wait(event) + sleep(6) + + elif dora_event["id"] == "led": + [r, g, b] = dora_event["value"].to_numpy() + rgb = [r, g, b] + if rgb != rgb: + ep_robot.led.set_led( + comp=led.COMP_ALL, r=r, g=g, b=b, effect=led.EFFECT_ON + ) + rgb = rgb diff --git a/examples/hf-operator/robot_minimize.py b/examples/hf-operator/robot_minimize.py new file mode 100644 index 0000000000000000000000000000000000000000..215f90a4636f5de359a3b0db386d37eaacd67e32 --- /dev/null +++ b/examples/hf-operator/robot_minimize.py @@ -0,0 +1,74 @@ +from robomaster import robot +from time import sleep +from dora import Node +import numpy as np +import pyarrow as pa + + +def wait(event): + if event is not None and not (event._event.isSet() and event.is_completed): + sleep(1) + + +ep_robot = robot.Robot() +# assert ep_robot.initialize(conn_type="sta") + +assert ep_robot.initialize(conn_type="ap"), "Could not initialize ep_robot" +assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream" +ep_robot.gimbal.recenter().wait_for_completed() + + +node = Node() + +current_location = "HOME" +LOCATION = { + "HOME": { + "KITCHEN": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + "OFFICE": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + }, + "KITCHEN": { + "OFFICE": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, + "OFFICE": { + "KITCHEN": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, +} + +for dora_event in node: + if dora_event["type"] == "INPUT" and dora_event["id"] == "control": + [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy() + print(dora_event["value"].to_numpy(), flush=True) + + if any([pitch, yaw]): + event = ep_robot.gimbal.moveto( + pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 + ) + wait(event) + sleep(2) + if any([x, y, z]): + event = ep_robot.chassis.move( + x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed + ) + wait(event) + sleep(6) + if dora_event["type"] == "INPUT" and dora_event["id"] == "go_to": + destination = dora_event["value"][0].as_py() + commands = LOCATION[current_location][destination] + for command in commands: + + [x, y, z, xy_speed, z_speed, pitch, yaw] = command + + if any([pitch, yaw]): + event = ep_robot.gimbal.moveto( + pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 + ) + wait(event) + sleep(2) + if any([x, y, z]): + event = ep_robot.chassis.move( + x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed + ) + wait(event) + sleep(3) + node.send_output(f"reached_{destination.lower()}", pa.array([])) + current_location = destination diff --git a/examples/hf-operator/robot_minimize_ap.py b/examples/hf-operator/robot_minimize_ap.py new file mode 100644 index 0000000000000000000000000000000000000000..1c81b1107221c0615c65df76970cba81cc4c2273 --- /dev/null +++ b/examples/hf-operator/robot_minimize_ap.py @@ -0,0 +1,92 @@ +from robomaster import robot +import robomaster +from time import sleep +from dora import Node +import numpy as np +import pyarrow as pa +# import robomaster + + +def wait(event): + if event is not None and not (event._event.isSet() and event.is_completed): + sleep(1) + +# robomaster.config.LOCAL_IP_STR = "192.168.2.1" + +# ep_robot = robot.Robot() +# ep_robot.initialize(conn_type="ap") +# assert ep_robot.initialize(conn_type="sta") +# assert ep_robot.initialize(conn_type="ap"), "Could not initialize ep_robot" +# assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream" +CONN = "ap" + + +ep_robot = robot.Robot() +robomaster.config.LOCAL_IP_STR="" +robomaster.config.ROBOT_IP_STR="192.168.2.1" +ep_robot.initialize(conn_type="ap") +print(ep_robot.get_sn()) +print(ep_robot.camera.audio_stream_addr) +ep_robot.camera.start_video_stream(display=False) +node = Node() +ep_robot.gimbal.recenter().wait_for_completed() + +TCP_STREAM_URL = "tcp://192.168.2.1:40922" + +# cap = cv2.VideoCapture(TCP_STREAM_URL) + +# Check if the VideoCapture object opened successfully +# assert cap.isOpened(), "Error: Could not open video capture." +# print(cap.isOpened()) +current_location = "HOME" +LOCATION = { + "HOME": { + "KITCHEN": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + "OFFICE": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + }, + "KITCHEN": { + "OFFICE": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, + "OFFICE": { + "KITCHEN": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, +} + +# for dora_event in node: +# if dora_event["type"] == "INPUT" and dora_event["id"] == "control": +# [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy() +# print(dora_event["value"].to_numpy(), flush=True) + +# if any([pitch, yaw]): +# event = ep_robot.gimbal.moveto( +# pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 +# ) +# wait(event) +# sleep(2) +# if any([x, y, z]): +# event = ep_robot.chassis.move( +# x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed +# ) +# wait(event) +# sleep(6) +# if dora_event["type"] == "INPUT" and dora_event["id"] == "go_to": +# destination = dora_event["value"][0].as_py() +# commands = LOCATION[current_location][destination] +# for command in commands: + +# [x, y, z, xy_speed, z_speed, pitch, yaw] = command + +# if any([pitch, yaw]): +# event = ep_robot.gimbal.moveto( +# pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 +# ) +# wait(event) +# sleep(2) +# if any([x, y, z]): +# event = ep_robot.chassis.move( +# x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed +# ) +# wait(event) +# sleep(3) +# node.send_output(f"reached_{destination.lower()}", pa.array([])) +# current_location = destination diff --git a/examples/hf-operator/robot_minimize_wifi.py b/examples/hf-operator/robot_minimize_wifi.py new file mode 100644 index 0000000000000000000000000000000000000000..79e46986d305f709375790766497b7595cf96ae4 --- /dev/null +++ b/examples/hf-operator/robot_minimize_wifi.py @@ -0,0 +1,77 @@ +from robomaster import robot +from time import sleep +from dora import Node +import numpy as np +import pyarrow as pa +from MyQR import myqr +import sys +print(sys.version) + +def wait(event): + if event is not None and not (event._event.isSet() and event.is_completed): + sleep(1) + + +# helper = conn.ConnectionHelper() +# info = helper.build_qrcode_string(ssid="AIPARK", password="bjaipark") +# myqr.run(words=info) +ep_robot = robot.Robot() +assert ep_robot.initialize(conn_type="sta", sn="159CKAD0060BQN"), "Could not initialize ep_robot" +assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream" +ep_robot.gimbal.recenter().wait_for_completed() + + +node = Node() + +current_location = "HOME" +LOCATION = { + "HOME": { + "KITCHEN": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + "OFFICE": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]), + }, + "KITCHEN": { + "OFFICE": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, + "OFFICE": { + "KITCHEN": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]), + }, +} + +for dora_event in node: + if dora_event["type"] == "INPUT" and dora_event["id"] == "control": + [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy() + print(dora_event["value"].to_numpy(), flush=True) + + if any([pitch, yaw]): + event = ep_robot.gimbal.moveto( + pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 + ) + wait(event) + sleep(2) + if any([x, y, z]): + event = ep_robot.chassis.move( + x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed + ) + wait(event) + sleep(6) + if dora_event["type"] == "INPUT" and dora_event["id"] == "go_to": + destination = dora_event["value"][0].as_py() + commands = LOCATION[current_location][destination] + for command in commands: + + [x, y, z, xy_speed, z_speed, pitch, yaw] = command + + if any([pitch, yaw]): + event = ep_robot.gimbal.moveto( + pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0 + ) + wait(event) + sleep(2) + if any([x, y, z]): + event = ep_robot.chassis.move( + x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed + ) + wait(event) + sleep(3) + node.send_output(f"reached_{destination.lower()}", pa.array([])) + current_location = destination diff --git a/examples/hf-operator/stream.py b/examples/hf-operator/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..71e487d868e55c2bf95b7e019875426b8b771d6c --- /dev/null +++ b/examples/hf-operator/stream.py @@ -0,0 +1,31 @@ +from robomaster import camera,robot +import time +import robomaster +import cv2 +CONN = "ap" +ep_robot = robot.Robot() +print("Initializing robot...", flush=True) +ep_robot.initialize(conn_type=CONN) +# ep_robot.camera.start_video_stream(display=False) + + +ep_camera = ep_robot.camera +# ep_camera.start_video_stream(display=False) +ep_camera.start_video_stream(display=True, resolution=camera.STREAM_360P) +time.sleep(1000) +# ep_camera. +print(ep_robot.get_sn) +print(ep_robot.camera.video_stream_addr) +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 +STREAM_URL = "tcp://192.168.2.1:40921" +cap = cv2.VideoCapture(STREAM_URL) +print(cap.isOpened()) + + + + +cap.release() + + +ep_robot.camera.stop_video_stream() diff --git a/examples/hf-operator/test_robomaster.py b/examples/hf-operator/test_robomaster.py new file mode 100644 index 0000000000000000000000000000000000000000..86f287eedfa6c465119b0cae5a06e85a32261907 --- /dev/null +++ b/examples/hf-operator/test_robomaster.py @@ -0,0 +1,10 @@ +from robomaster import robot, blaster, led +import robomaster + +CONN="sta" +robomaster.config.LOCAL_IP_STR = "" +robomaster.config.ROBOT_IP_STR = "192.168.2.1" +ep_robot = robot.Robot() +print("Initializing robot...") +assert ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot" +event = ep_robot.chassis.move(x=0, y=0, z=-90.0, xy_speed=0, z_speed=50) diff --git a/examples/hf-operator/test_sta.py b/examples/hf-operator/test_sta.py new file mode 100644 index 0000000000000000000000000000000000000000..b893de9e74409c9f101588391d2d4d3629606d9a --- /dev/null +++ b/examples/hf-operator/test_sta.py @@ -0,0 +1,14 @@ +import cv2 +from robomaster import robot,camera +import time + +ep_robot = robot.Robot() +ep_robot.initialize(conn_type="sta", sn="159CKAD0060BQN") +ep_camera = ep_robot.camera + +# 显示十秒图传 +ep_camera.start_video_stream(display=True, resolution=camera.STREAM_360P) +time.sleep(1000) +ep_camera.stop_video_stream() + +ep_robot.close() \ No newline at end of file diff --git a/examples/hf-operator/utils.py b/examples/hf-operator/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..892e5536942d10b8905638f9ac78ef79e0b34eb9 --- /dev/null +++ b/examples/hf-operator/utils.py @@ -0,0 +1,149 @@ +import torch + + +from transformers import AutoProcessor, AutoModelForCausalLM, AwqConfig,AutoTokenizer + +import numpy as np +import pyttsx3 + +START_TO_COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel() +COUCH_TO_KITCHEN = np.array([[0.5, -0.5], [1.0, -1.0]]).ravel() +KITCHEN_TO_START = np.array([[0.5, -0.5], [0, 0]]).ravel() + +engine = pyttsx3.init("espeak") +voices = engine.getProperty("voices") +engine.setProperty("voice", voices[3].id) + + +def speak(text): + print(f"said {text}", flush=True) + engine.say(text) + engine.runAndWait() + + +speak("hello") + +MODE = "fused_quantized" +DEVICE = "cuda" +# PROCESSOR = AutoProcessor.from_pretrained("/mnt/c/idefics2-8b-AWQ") +tokenizer = AutoTokenizer.from_pretrained( + '/home/peiji/Bunny-v1_0-2B-zh/', + trust_remote_code=True) +BAD_WORDS_IDS = tokenizer( + ["", ""], add_special_tokens=False +).input_ids +EOS_WORDS_IDS = tokenizer( + "", add_special_tokens=False +).input_ids + [tokenizer.eos_token_id] +# set device +device = 'cuda' # or cpu +torch.set_default_device(device) + +# create model +model = AutoModelForCausalLM.from_pretrained( + '/home/peiji/Bunny-v1_0-2B-zh/', + torch_dtype=torch.float16, # float32 for cpu + device_map='auto', + trust_remote_code=True + ) + +print("load bunny model finish") +# # Load model +# if MODE == "regular": +# model = AutoModelForVision2Seq.from_pretrained( +# "/mnt/c/idefics2-8b-AWQ", +# torch_dtype=torch.float16, +# trust_remote_code=True, +# _attn_implementation="flash_attention_2", +# revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d", +# ).to(DEVICE) +# elif MODE == "quantized": +# quant_path = "/mnt/c/idefics2-8b-AWQ" +# model = AutoModelForVision2Seq.from_pretrained( +# quant_path, trust_remote_code=True +# ).to(DEVICE) +# elif MODE == "fused_quantized": +# quant_path = "/mnt/c/idefics2-8b-AWQ" +# quantization_config = AwqConfig( +# bits=4, +# fuse_max_seq_len=4096, +# modules_to_fuse={ +# "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], +# "mlp": ["gate_proj", "up_proj", "down_proj"], +# "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"], +# "use_alibi": False, +# "num_attention_heads": 32, +# "num_key_value_heads": 8, +# "hidden_size": 4096, +# }, +# ) +# model = AutoModelForVision2Seq.from_pretrained( +# quant_path, quantization_config=quantization_config, trust_remote_code=True +# ).to(DEVICE) +# else: +# raise ValueError("Unknown mode") + + +# def reset_awq_cache(model): +# """ +# Simple method to reset the AWQ fused modules cache +# """ +# from awq.modules.fused.attn import QuantAttentionFused + +# for name, module in model.named_modules(): +# if isinstance(module, QuantAttentionFused): +# module.start_pos = 0 + + +def ask_vlm(image, instruction): + prompts = [ + "User:", + image, + f"{instruction}.\n", + "Assistant:", + ] + speak(instruction) + inputs = tokenizer(prompts) + inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()} + + generated_ids = model.generate( + **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=50 + ) + generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + + text = generated_texts[0].split("\nAssistant: ")[1] + # reset_awq_cache(model) + speak(text) + return text + + +# import requests +# import torch +# from PIL import Image +# from io import BytesIO + + +# def download_image(url): +# try: +# # Send a GET request to the URL to download the image +# response = requests.get(url) +# # Check if the request was successful (status code 200) +# if response.status_code == 200: +# # Open the image using PIL +# image = Image.open(BytesIO(response.content)) +# # Return the PIL image object +# return image +# else: +# print(f"Failed to download image. Status code: {response.status_code}") +# return None +# except Exception as e: +# print(f"An error occurred: {e}") +# return None + + +# # Create inputs +# image1 = download_image( +# "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" +# ) + +# print(ask_vlm(image1, "What is this?")) diff --git a/examples/hf-operator/webcam.py b/examples/hf-operator/webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..3511dd10dbbe57a7a060d1be66df54b427a885da --- /dev/null +++ b/examples/hf-operator/webcam.py @@ -0,0 +1,78 @@ +import os +import time + +import cv2 +import numpy as np +import pyarrow as pa + +from dora import DoraStatus + +CAMERA_WIDTH = 960 +CAMERA_HEIGHT = 540 + +CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0)) +CI = os.environ.get("CI") + +font = cv2.FONT_HERSHEY_SIMPLEX + + +class Operator: + """ + Sending image from webcam to the dataflow + """ + + def __init__(self): + self.video_capture = cv2.VideoCapture(CAMERA_INDEX) + # if self.video_capture.isOpened(): + # raise ValueError(f"Could open video capture for camera index {CAMERA_INDEX}") + self.start_time = time.time() + self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH) + self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT) + self.failure_count = 0 + + def on_event( + self, + dora_event: str, + send_output, + ) -> DoraStatus: + event_type = dora_event["type"] + if event_type == "INPUT": + ret, frame = self.video_capture.read() + if ret: + frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + self.failure_count = 0 + ## Push an error image in case the camera is not available. + else: + if self.failure_count > 10: + frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (CAMERA_INDEX), + (int(30), int(30)), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) + else: + self.failure_count += 1 + return DoraStatus.CONTINUE + + send_output( + "image", + pa.array(frame.ravel()), + dora_event["metadata"], + ) + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) + + if time.time() - self.start_time < 20 or CI != "true": + return DoraStatus.CONTINUE + else: + return DoraStatus.STOP + + def __del__(self): + self.video_capture.release() diff --git a/examples/hf-operator/whisper_op.py b/examples/hf-operator/whisper_op.py new file mode 100644 index 0000000000000000000000000000000000000000..0f06e91373107250fe17a2ad5f64fd10b8a40035 --- /dev/null +++ b/examples/hf-operator/whisper_op.py @@ -0,0 +1,82 @@ +import pyarrow as pa +import whisper +from pynput import keyboard +from pynput.keyboard import Key, Events +from dora import Node + +import torch +import numpy as np +import pyarrow as pa +import sounddevice as sd +import gc # garbage collect library + +model = whisper.load_model("base") + +SAMPLE_RATE = 16000 +MAX_DURATION = 30 + +policy_init = True +audio_data = None +node = Node() + +for dora_event in node: + if dora_event["type"] == "INPUT": + ## Check for keyboard event + with keyboard.Events() as events: + event = events.get(1.0) + if ( + event is not None + and (event.key == Key.alt_r or event.key == Key.ctrl_r) + and isinstance(event, Events.Press) + ): + + ## Microphone + audio_data = sd.rec( + int(SAMPLE_RATE * MAX_DURATION), + samplerate=SAMPLE_RATE, + channels=1, + dtype=np.int16, + blocking=False, + ) + + elif ( + event is not None + and event.key == Key.alt_r + and isinstance(event, Events.Release) + ): + sd.stop() + if audio_data is None: + continue + audio = audio_data.ravel().astype(np.float32) / 32768.0 + + ## Speech to text + audio = whisper.pad_or_trim(audio) + result = model.transcribe(audio, language="en") + node.send_output( + "text_llm", pa.array([result["text"]]), dora_event["metadata"] + ) + # send_output("led", pa.array([0, 0, 255])) + + gc.collect() + torch.cuda.empty_cache() + + elif ( + event is not None + and event.key == Key.ctrl_r + and isinstance(event, Events.Release) + ): + sd.stop() + if audio_data is None: + continue + audio = audio_data.ravel().astype(np.float32) / 32768.0 + + ## Speech to text + audio = whisper.pad_or_trim(audio) + result = model.transcribe(audio, language="en") + node.send_output( + "text_policy", pa.array([result["text"]]), dora_event["metadata"] + ) + # send_output("led", pa.array([0, 0, 255])) + + gc.collect() + torch.cuda.empty_cache() diff --git a/examples/multiple-daemons/dataflow.yml b/examples/multiple-daemons/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..363b98bbaf9e3d09988acd694660ce88f726c63e --- /dev/null +++ b/examples/multiple-daemons/dataflow.yml @@ -0,0 +1,31 @@ +nodes: + - id: rust-node + _unstable_deploy: + machine: A + custom: + build: cargo build -p multiple-daemons-example-node + source: ../../target/debug/multiple-daemons-example-node + inputs: + tick: dora/timer/millis/10 + outputs: + - random + - id: runtime-node + _unstable_deploy: + machine: A + operators: + - id: rust-operator + build: cargo build -p multiple-daemons-example-operator + shared-library: ../../target/debug/multiple_daemons_example_operator + inputs: + tick: dora/timer/millis/100 + random: rust-node/random + outputs: + - status + - id: rust-sink + _unstable_deploy: + machine: B + custom: + build: cargo build -p multiple-daemons-example-sink + source: ../../target/debug/multiple-daemons-example-sink + inputs: + message: runtime-node/rust-operator/status diff --git a/examples/multiple-daemons/node/Cargo.toml b/examples/multiple-daemons/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1175c80da7222004b58d1ee470b001f832675145 --- /dev/null +++ b/examples/multiple-daemons/node/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "multiple-daemons-example-node" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" +futures = "0.3.21" +rand = "0.8.5" +tokio = { version = "1.24.2", features = ["rt", "macros"] } diff --git a/examples/multiple-daemons/node/src/main.rs b/examples/multiple-daemons/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..36f42d578bf821676320546b1fdcf67d00942b45 --- /dev/null +++ b/examples/multiple-daemons/node/src/main.rs @@ -0,0 +1,35 @@ +use dora_node_api::{self, dora_core::config::DataId, DoraNode, Event, IntoArrow}; + +fn main() -> eyre::Result<()> { + println!("hello"); + + let output = DataId::from("random".to_owned()); + + let (mut node, mut events) = DoraNode::init_from_env()?; + + for i in 0..100 { + let event = match events.recv() { + Some(input) => input, + None => break, + }; + + match event { + Event::Input { + id, + metadata, + data: _, + } => match id.as_str() { + "tick" => { + let random: u64 = rand::random(); + println!("tick {i}, sending {random:#x}"); + node.send_output(output.clone(), metadata.parameters, random.into_arrow())?; + } + other => eprintln!("Ignoring unexpected input `{other}`"), + }, + Event::Stop => println!("Received manual stop"), + other => eprintln!("Received unexpected input: {other:?}"), + } + } + + Ok(()) +} diff --git a/examples/multiple-daemons/operator/Cargo.toml b/examples/multiple-daemons/operator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f7ed41fb5cf56e9d2be4aa25235325ce4ac11da4 --- /dev/null +++ b/examples/multiple-daemons/operator/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "multiple-daemons-example-operator" +version.workspace = true +edition = "2021" +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[lib] +crate-type = ["cdylib"] + +[dependencies] +dora-operator-api = { workspace = true } diff --git a/examples/multiple-daemons/operator/src/lib.rs b/examples/multiple-daemons/operator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d18138c53d14ee6de5f41bffdf35bf18b68b9a1e --- /dev/null +++ b/examples/multiple-daemons/operator/src/lib.rs @@ -0,0 +1,52 @@ +#![warn(unsafe_op_in_unsafe_fn)] + +use dora_operator_api::{ + register_operator, DoraOperator, DoraOutputSender, DoraStatus, Event, IntoArrow, +}; + +register_operator!(ExampleOperator); + +#[derive(Debug, Default)] +struct ExampleOperator { + ticks: usize, +} + +impl DoraOperator for ExampleOperator { + fn on_event( + &mut self, + event: &Event, + output_sender: &mut DoraOutputSender, + ) -> Result { + match event { + Event::Input { id, data } => match *id { + "tick" => { + self.ticks += 1; + } + "random" => { + let data = u64::try_from(data) + .map_err(|err| format!("expected u64 message: {err}"))?; + + let output = format!( + "operator received random value {data:#x} after {} ticks", + self.ticks + ); + output_sender.send("status".into(), output.into_arrow())?; + } + other => eprintln!("ignoring unexpected input {other}"), + }, + Event::Stop => {} + Event::InputClosed { id } => { + println!("input `{id}` was closed"); + if *id == "random" { + println!("`random` input was closed -> exiting"); + return Ok(DoraStatus::Stop); + } + } + other => { + println!("received unknown event {other:?}"); + } + } + + Ok(DoraStatus::Continue) + } +} diff --git a/examples/multiple-daemons/run.rs b/examples/multiple-daemons/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..32a695cc5ddd01e9c6d2942703b532ff94aeb4a7 --- /dev/null +++ b/examples/multiple-daemons/run.rs @@ -0,0 +1,229 @@ +use dora_coordinator::{ControlEvent, Event}; +use dora_core::{ + descriptor::Descriptor, + topics::{ + ControlRequest, ControlRequestReply, DataflowId, DORA_COORDINATOR_PORT_CONTROL_DEFAULT, + DORA_COORDINATOR_PORT_DEFAULT, + }, +}; +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; + +use std::{ + collections::BTreeSet, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::Path, + time::Duration, +}; +use tokio::{ + sync::{ + mpsc::{self, Sender}, + oneshot, + }, + task::JoinSet, +}; +use tokio_stream::wrappers::ReceiverStream; +use uuid::Uuid; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("multiple-daemon-runner").wrap_err("failed to set up tracing subscriber")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + let dataflow = Path::new("dataflow.yml"); + build_dataflow(dataflow).await?; + + let (coordinator_events_tx, coordinator_events_rx) = mpsc::channel(1); + let coordinator_bind = SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + DORA_COORDINATOR_PORT_DEFAULT, + ); + let coordinator_control_bind = SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + DORA_COORDINATOR_PORT_CONTROL_DEFAULT, + ); + let (coordinator_port, coordinator) = dora_coordinator::start( + coordinator_bind, + coordinator_control_bind, + ReceiverStream::new(coordinator_events_rx), + ) + .await?; + let coordinator_addr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), coordinator_port); + let daemon_a = run_daemon(coordinator_addr.to_string(), "A"); + let daemon_b = run_daemon(coordinator_addr.to_string(), "B"); + + tracing::info!("Spawning coordinator and daemons"); + let mut tasks = JoinSet::new(); + tasks.spawn(coordinator); + tasks.spawn(daemon_a); + tasks.spawn(daemon_b); + + tracing::info!("waiting until daemons are connected to coordinator"); + let mut retries = 0; + loop { + let connected_machines = connected_machines(&coordinator_events_tx).await?; + if connected_machines.contains("A") && connected_machines.contains("B") { + break; + } else if retries > 20 { + bail!("daemon not connected after {retries} retries"); + } else { + std::thread::sleep(Duration::from_millis(500)); + retries += 1 + } + } + + tracing::info!("starting dataflow"); + let uuid = start_dataflow(dataflow, &coordinator_events_tx).await?; + tracing::info!("started dataflow under ID `{uuid}`"); + + let running = running_dataflows(&coordinator_events_tx).await?; + if !running.iter().map(|d| d.uuid).any(|id| id == uuid) { + bail!("dataflow `{uuid}` is not running"); + } + + tracing::info!("waiting for dataflow `{uuid}` to finish"); + let mut retries = 0; + loop { + let running = running_dataflows(&coordinator_events_tx).await?; + if running.is_empty() { + break; + } else if retries > 100 { + bail!("dataflow not finished after {retries} retries"); + } else { + tracing::debug!("not done yet"); + std::thread::sleep(Duration::from_millis(500)); + retries += 1 + } + } + tracing::info!("dataflow `{uuid}` finished, destroying coordinator"); + destroy(&coordinator_events_tx).await?; + + tracing::info!("joining tasks"); + while let Some(res) = tasks.join_next().await { + res.unwrap()?; + } + + tracing::info!("done"); + Ok(()) +} + +async fn start_dataflow( + dataflow: &Path, + coordinator_events_tx: &Sender, +) -> eyre::Result { + let dataflow_descriptor = Descriptor::read(dataflow) + .await + .wrap_err("failed to read yaml dataflow")?; + let working_dir = dataflow + .canonicalize() + .context("failed to canonicalize dataflow path")? + .parent() + .ok_or_else(|| eyre::eyre!("dataflow path has no parent dir"))? + .to_owned(); + dataflow_descriptor + .check(&working_dir) + .wrap_err("could not validate yaml")?; + + let (reply_sender, reply) = oneshot::channel(); + coordinator_events_tx + .send(Event::Control(ControlEvent::IncomingRequest { + request: ControlRequest::Start { + dataflow: dataflow_descriptor, + local_working_dir: working_dir, + name: None, + }, + reply_sender, + })) + .await?; + let result = reply.await??; + let uuid = match result { + ControlRequestReply::DataflowStarted { uuid } => uuid, + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected start dataflow reply: {other:?}"), + }; + Ok(uuid) +} + +async fn connected_machines( + coordinator_events_tx: &Sender, +) -> eyre::Result> { + let (reply_sender, reply) = oneshot::channel(); + coordinator_events_tx + .send(Event::Control(ControlEvent::IncomingRequest { + request: ControlRequest::ConnectedMachines, + reply_sender, + })) + .await?; + let result = reply.await??; + let machines = match result { + ControlRequestReply::ConnectedMachines(machines) => machines, + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected start dataflow reply: {other:?}"), + }; + Ok(machines) +} + +async fn running_dataflows(coordinator_events_tx: &Sender) -> eyre::Result> { + let (reply_sender, reply) = oneshot::channel(); + coordinator_events_tx + .send(Event::Control(ControlEvent::IncomingRequest { + request: ControlRequest::List, + reply_sender, + })) + .await?; + let result = reply.await??; + let dataflows = match result { + ControlRequestReply::DataflowList { dataflows } => dataflows, + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected start dataflow reply: {other:?}"), + }; + Ok(dataflows) +} + +async fn destroy(coordinator_events_tx: &Sender) -> eyre::Result<()> { + let (reply_sender, reply) = oneshot::channel(); + coordinator_events_tx + .send(Event::Control(ControlEvent::IncomingRequest { + request: ControlRequest::Destroy, + reply_sender, + })) + .await?; + let result = reply.await??; + match result { + ControlRequestReply::DestroyOk => Ok(()), + ControlRequestReply::Error(err) => bail!("{err}"), + other => bail!("unexpected start dataflow reply: {other:?}"), + } +} + +async fn build_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--").arg("build").arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to build dataflow"); + }; + Ok(()) +} + +async fn run_daemon(coordinator: String, machine_id: &str) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--machine-id") + .arg(machine_id) + .arg("--coordinator-addr") + .arg(coordinator); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/multiple-daemons/sink/Cargo.toml b/examples/multiple-daemons/sink/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ed0823bd149f46fd0791bab42102b02e10e24872 --- /dev/null +++ b/examples/multiple-daemons/sink/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "multiple-daemons-example-sink" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" diff --git a/examples/multiple-daemons/sink/src/main.rs b/examples/multiple-daemons/sink/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..e180af0816c21ea20277ab67bf94f33ea4c628df --- /dev/null +++ b/examples/multiple-daemons/sink/src/main.rs @@ -0,0 +1,38 @@ +use dora_node_api::{self, DoraNode, Event}; +use eyre::{bail, Context}; + +fn main() -> eyre::Result<()> { + let (_node, mut events) = DoraNode::init_from_env()?; + + while let Some(event) = events.recv() { + match event { + Event::Input { + id, + metadata: _, + data, + } => match id.as_str() { + "message" => { + let received_string: &str = + TryFrom::try_from(&data).context("expected string message")?; + println!("sink received message: {}", received_string); + if !received_string.starts_with("operator received random value ") { + bail!("unexpected message format (should start with 'operator received random value')") + } + if !received_string.ends_with(" ticks") { + bail!("unexpected message format (should end with 'ticks')") + } + } + other => eprintln!("Ignoring unexpected input `{other}`"), + }, + Event::Stop => { + println!("Received manual stop"); + } + Event::InputClosed { id } => { + println!("Input `{id}` was closed"); + } + other => eprintln!("Received unexpected input: {other:?}"), + } + } + + Ok(()) +} diff --git a/examples/python-dataflow/.gitignore b/examples/python-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..eede66d83f98ba9915ece45358a792562b5aedab --- /dev/null +++ b/examples/python-dataflow/.gitignore @@ -0,0 +1 @@ +*.pt \ No newline at end of file diff --git a/examples/python-dataflow/README.md b/examples/python-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2211a7217fb396a00a3fece717f799100d50a106 --- /dev/null +++ b/examples/python-dataflow/README.md @@ -0,0 +1,25 @@ +# Python Dataflow Example + +This examples shows how to create and connect dora operators and custom nodes in Python. + +## Overview + +The [`dataflow.yml`](./dataflow.yml) defines a simple dataflow graph with the following three nodes: + +- a webcam node, that connects to your webcam and feed the dataflow with webcam frame as jpeg compressed bytearray. +- an object detection node, that apply Yolo v5 on the webcam image. The model is imported from Pytorch Hub. The output is the bounding box of each object detected, the confidence and the class. You can have more info here: https://pytorch.org/hub/ultralytics_yolov5/ +- a window plotting node, that will retrieve the webcam image and the Yolov5 bounding box and join the two together. + +## Getting started + +```bash +cargo run --example python-dataflow +``` + +## Run the dataflow as a standalone + +- Start the `dora-daemon`: + +``` +../../target/release/dora-daemon --run-dataflow dataflow.yml +``` diff --git a/examples/python-dataflow/dataflow.yml b/examples/python-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..612ce4107b5bf6e6bce02088753ba203ae65d394 --- /dev/null +++ b/examples/python-dataflow/dataflow.yml @@ -0,0 +1,25 @@ +nodes: + - id: webcam + custom: + source: ./webcam.py + inputs: + tick: + source: dora/timer/millis/50 + queue_size: 1000 + outputs: + - image + + - id: object_detection + custom: + source: ./object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + + - id: plot + custom: + source: ./plot.py + inputs: + image: webcam/image + bbox: object_detection/bbox diff --git a/examples/python-dataflow/object_detection.py b/examples/python-dataflow/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..70a0e712dda428f0dd999c3d24599802c1e9f6f8 --- /dev/null +++ b/examples/python-dataflow/object_detection.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import cv2 +import numpy as np +from ultralytics import YOLO + +from dora import Node +import pyarrow as pa + +model = YOLO("yolov8n.pt") + +node = Node() + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + event_id = event["id"] + if event_id == "image": + print("[object detection] received image input") + frame = event["value"].to_numpy() + frame = cv2.imdecode(frame, -1) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = model(frame) # includes NMS + # Process results + boxes = np.array(results[0].boxes.xyxy.cpu()) + conf = np.array(results[0].boxes.conf.cpu()) + label = np.array(results[0].boxes.cls.cpu()) + # concatenate them together + arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + + node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) + else: + print("[object detection] ignoring unexpected input:", event_id) + elif event_type == "STOP": + print("[object detection] received stop") + elif event_type == "ERROR": + print("[object detection] error: ", event["error"]) + else: + print("[object detection] received unexpected event:", event_type) diff --git a/examples/python-dataflow/plot.py b/examples/python-dataflow/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..035fc41d47ed59a4e3081be92101ae5ca9c212b4 --- /dev/null +++ b/examples/python-dataflow/plot.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +from dora import Node +from dora import DoraStatus + +import cv2 +import numpy as np +from utils import LABELS + +CI = os.environ.get("CI") + +font = cv2.FONT_HERSHEY_SIMPLEX + + +class Plotter: + """ + Plot image and bounding box + """ + + def __init__(self): + self.image = [] + self.bboxs = [] + + def on_input( + self, + dora_input, + ) -> DoraStatus: + """ + Put image and bounding box on cv2 window. + + Args: + dora_input["id"] (str): Id of the dora_input declared in the yaml configuration + dora_input["value"] (arrow array): message of the dora_input + """ + if dora_input["id"] == "image": + frame = dora_input["value"].to_numpy() + frame = cv2.imdecode(frame, -1) + self.image = frame + + elif dora_input["id"] == "bbox" and len(self.image) != 0: + bboxs = dora_input["value"].to_numpy() + self.bboxs = np.reshape(bboxs, (-1, 6)) + for bbox in self.bboxs: + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + cv2.rectangle( + self.image, + (int(min_x), int(min_y)), + (int(max_x), int(max_y)), + (0, 255, 0), + 2, + ) + + cv2.putText( + self.image, + LABELS[int(label)] + f", {confidence:0.2f}", + (int(max_x), int(max_y)), + font, + 0.75, + (0, 255, 0), + 2, + 1, + ) + + if CI != "true": + cv2.imshow("frame", self.image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + + return DoraStatus.CONTINUE + + +plotter = Plotter() +node = Node() + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + status = plotter.on_input(event) + if status == DoraStatus.CONTINUE: + pass + elif status == DoraStatus.STOP: + print("plotter returned stop status") + break + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) diff --git a/examples/python-dataflow/requirements.txt b/examples/python-dataflow/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8f02b0a09db304d89be0c5d157906cf92001ef7 --- /dev/null +++ b/examples/python-dataflow/requirements.txt @@ -0,0 +1,47 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +ultralytics +gitpython +ipython # interactive notebook +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.1 +Pillow>=7.1.2 +psutil # system resources +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +thop>=0.1.1 # FLOPs computation +torch # see https://pytorch.org/get-started/locally (recommended) +torchvision +tqdm>=4.64.0 + +# Logging ------------------------------------- +tensorboard>=2.4.1 +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +opencv-python>=4.1.1 +maturin \ No newline at end of file diff --git a/examples/python-dataflow/run.rs b/examples/python-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..a14b553f0144e14a51cfb9577b89b15f7dd574ab --- /dev/null +++ b/examples/python-dataflow/run.rs @@ -0,0 +1,102 @@ +use dora_core::{get_pip_path, get_python_path, run}; +use dora_download::download_file; +use dora_tracing::set_up_tracing; +use eyre::{bail, ContextCompat, WrapErr}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("python-dataflow-runner")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + run( + get_python_path().context("Could not get python binary")?, + &["-m", "venv", "../.env"], + None, + ) + .await + .context("failed to create venv")?; + let venv = &root.join("examples").join(".env"); + std::env::set_var( + "VIRTUAL_ENV", + venv.to_str().context("venv path not valid unicode")?, + ); + let orig_path = std::env::var("PATH")?; + // bin folder is named Scripts on windows. + // 🤦‍♂️ See: https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + let venv_bin = if cfg!(windows) { + venv.join("Scripts") + } else { + venv.join("bin") + }; + + if cfg!(windows) { + std::env::set_var( + "PATH", + format!( + "{};{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } else { + std::env::set_var( + "PATH", + format!( + "{}:{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } + + run( + get_python_path().context("Could not get pip binary")?, + &["-m", "pip", "install", "--upgrade", "pip"], + None, + ) + .await + .context("failed to install pip")?; + run( + get_pip_path().context("Could not get pip binary")?, + &["install", "-r", "requirements.txt"], + None, + ) + .await + .context("pip install failed")?; + + run( + "maturin", + &["develop"], + Some(&root.join("apis").join("python").join("node")), + ) + .await + .context("maturin develop failed")?; + download_file( + "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt", + Path::new("yolov8n.pt"), + ) + .await + .context("Could not download weights.")?; + + let dataflow = Path::new("dataflow.yml"); + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/python-dataflow/utils.py b/examples/python-dataflow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dabc915e1935814866d5758911ad426ac9419ee1 --- /dev/null +++ b/examples/python-dataflow/utils.py @@ -0,0 +1,82 @@ +LABELS = [ + "ABC", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush", +] diff --git a/examples/python-dataflow/webcam.py b/examples/python-dataflow/webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..00b47f27fa9a3631324590ad5abafe2a9b18ffaf --- /dev/null +++ b/examples/python-dataflow/webcam.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import time +import numpy as np +import cv2 + +from dora import Node + +node = Node() + +CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0)) +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 +video_capture = cv2.VideoCapture(CAMERA_INDEX) +font = cv2.FONT_HERSHEY_SIMPLEX + +start = time.time() + +# Run for 20 seconds +while time.time() - start < 10: + # Wait next dora_input + event = node.next() + event_type = event["type"] + if event_type == "INPUT": + ret, frame = video_capture.read() + if not ret: + frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (CAMERA_INDEX), + (int(30), int(30)), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) + node.send_output( + "image", + cv2.imencode(".jpg", frame)[1].tobytes(), + event["metadata"], + ) + elif event_type == "STOP": + print("received stop") + break + else: + print("received unexpected event:", event_type) + break + +video_capture.release() diff --git a/examples/python-operator-dataflow/.gitignore b/examples/python-operator-dataflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..eede66d83f98ba9915ece45358a792562b5aedab --- /dev/null +++ b/examples/python-operator-dataflow/.gitignore @@ -0,0 +1 @@ +*.pt \ No newline at end of file diff --git a/examples/python-operator-dataflow/README.md b/examples/python-operator-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..30b04e61c59a732eb8adf1a3f9ae1444fd627327 --- /dev/null +++ b/examples/python-operator-dataflow/README.md @@ -0,0 +1,58 @@ +# Python Dataflow Example + +This examples shows how to create and connect dora operators and custom nodes in Python. + +## Overview + +The [`dataflow.yml`](./dataflow.yml) defines a simple dataflow graph with the following three nodes: + +- a webcam node, that connects to your webcam and feed the dataflow with webcam frame as jpeg compressed bytearray. +- an object detection node, that apply Yolo v5 on the webcam image. The model is imported from Pytorch Hub. The output is the bounding box of each object detected, the confidence and the class. You can have more info here: https://pytorch.org/hub/ultralytics_yolov5/ +- a window plotting node, that will retrieve the webcam image and the Yolov5 bounding box and join the two together. + +## Getting started + +```bash +pip install -r requirements.txt +cargo run --example python-operator-dataflow +``` + +## Installation + +```bash +conda create -n example_env python=3.11 +conda activate test_env +pip install -r requirements.txt +pip install -r requirements_llm.txt +``` + +## Run the dataflow + +- Start the object detection dataflow alone: + +```bash +dora start dataflow.yml +``` + +- Start the llm dataflow (Only works on Windows and Linux): + +```bash +dora start dataflow_llm.yml +``` + +Within the window you can ask question such as: + +```bash +ask how are you +change bounding box plot to red +change confidence value to percentage +change object detection to only detect person +send 200 200 200 400 to topic line +record +``` + +The keyboard, microphone, whisper node, works in a very similar fashion as the object detection dataflow and I'll let you check it out by yourself. + +The code modification flow works by first comparing an instruction with a vectordb of operators source code and then feeding the most similar operator to an llm with the instruction for code modification. + +The end result is then saved using a file saver. diff --git a/examples/python-operator-dataflow/__pycache__/file_saver_op.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/file_saver_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6c6b20683124c598ac7005b88fb79e306782ac Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/file_saver_op.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/llm_op.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/llm_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aed0440f83031b710412b580f0ce67b915ac897 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/llm_op.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/microphone_op.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/microphone_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c89a7e066a498901111c2f7e45e63d6715c78ec5 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/microphone_op.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/object_detection.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/object_detection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea1041b8709f1459e50a9e005b2aed27a496dba8 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/object_detection.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/object_detection.cpython-38.pyc b/examples/python-operator-dataflow/__pycache__/object_detection.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11af9827a19218098750c9e1d3ecf41841afe220 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/object_detection.cpython-38.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/plot.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/plot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5ae06a5c801a2c4948220f93909aec4a3098ec2 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/plot.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/plot.cpython-38.pyc b/examples/python-operator-dataflow/__pycache__/plot.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1baa272cadbd35e7387d09d024db4109c5eb2a4 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/plot.cpython-38.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/sentence_transformers_op.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/sentence_transformers_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48ca9d7ce97e72bd054a246d82de1f7fd8ee92a5 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/sentence_transformers_op.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/utils.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a2801832c9073e45441bcc4369a962bdc0088fb Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/utils.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/utils.cpython-38.pyc b/examples/python-operator-dataflow/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93e46da330b12263a8118ab4584bcc1c692de1e1 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/utils.cpython-38.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/webcam.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/webcam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24216df8801c5e1bb8ef104d616c7bdcb957f909 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/webcam.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/webcam.cpython-38.pyc b/examples/python-operator-dataflow/__pycache__/webcam.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3f8051b45ea057881ad4a96cba410fdf853cd19 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/webcam.cpython-38.pyc differ diff --git a/examples/python-operator-dataflow/__pycache__/whisper_op.cpython-310.pyc b/examples/python-operator-dataflow/__pycache__/whisper_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..222a8a8fa5bbe15b971b4e8db3943a57396de103 Binary files /dev/null and b/examples/python-operator-dataflow/__pycache__/whisper_op.cpython-310.pyc differ diff --git a/examples/python-operator-dataflow/dataflow.yml b/examples/python-operator-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..400f881f51c302e913c722d742f116390b856029 --- /dev/null +++ b/examples/python-operator-dataflow/dataflow.yml @@ -0,0 +1,26 @@ +nodes: + - id: webcam + operator: + python: webcam.py + inputs: + tick: dora/timer/millis/50 + outputs: + - image + + - id: object_detection + operator: + send_stdout_as: stdout + python: object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + - stdout + + - id: plot + operator: + python: plot.py + inputs: + image: webcam/image + bbox: object_detection/bbox + assistant_message: object_detection/stdout diff --git a/examples/python-operator-dataflow/dataflow_conda.yml b/examples/python-operator-dataflow/dataflow_conda.yml new file mode 100644 index 0000000000000000000000000000000000000000..ba367da22856a90eba6d46b4f5b7cab491a604c7 --- /dev/null +++ b/examples/python-operator-dataflow/dataflow_conda.yml @@ -0,0 +1,28 @@ +nodes: + - id: webcam + operator: + python: webcam.py + inputs: + tick: dora/timer/millis/50 + outputs: + - image + + - id: object_detection + operator: + send_stdout_as: stdout + python: object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + - stdout + + - id: plot + operator: + python: + source: plot.py + conda_env: base + inputs: + image: webcam/image + bbox: object_detection/bbox + assistant_message: object_detection/stdout diff --git a/examples/python-operator-dataflow/dataflow_llm.yml b/examples/python-operator-dataflow/dataflow_llm.yml new file mode 100644 index 0000000000000000000000000000000000000000..17f374177a82c8fedbdd9b00164612bcc1acefb8 --- /dev/null +++ b/examples/python-operator-dataflow/dataflow_llm.yml @@ -0,0 +1,100 @@ +nodes: + # - id: rerun + # custom: + # source: dora-rerun + # inputs: + # image: webcam/image + # boxes2d: object_detection/bbox + # textlog_llm: whisper/text + # # textlog_policy: whisper/text_policy + # envs: + # IMAGE_WIDTH: 1920 + # IMAGE_HEIGHT: 1080 + # IMAGE_DEPTH: 3 + # RERUN_MEMORY_LIMIT: 10% + - id: webcam + operator: + python: webcam.py + inputs: + tick: dora/timer/millis/50 + outputs: + - image + + - id: object_detection + operator: + python: object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + + - id: plot + operator: + python: plot.py + inputs: + image: webcam/image + bbox: object_detection/bbox + line: llm/line + keyboard_buffer: keyboard/buffer + user_message: keyboard/submitted + assistant_message: llm/assistant_message + + ## Speech to text + - id: keyboard + custom: + source: keyboard_op.py + outputs: + - buffer + - submitted + - record + - ask + - send + - change + inputs: + recording: whisper/text + + - id: microphone + operator: + python: microphone_op.py + inputs: + record: keyboard/record + outputs: + - audio + + - id: whisper + operator: + python: whisper_op.py + inputs: + audio: microphone/audio + outputs: + - text + + ## Code Modifier + - id: vectordb + operator: + python: sentence_transformers_op.py + inputs: + query: keyboard/change + saved_file: file_saver/saved_file + outputs: + - raw_file + + - id: llm + operator: + python: llm_op.py + inputs: + code_modifier: vectordb/raw_file + assistant: keyboard/ask + message_sender: keyboard/send + outputs: + - modified_file + - line + - assistant_message + + - id: file_saver + operator: + python: file_saver_op.py + inputs: + file: llm/modified_file + outputs: + - saved_file diff --git a/examples/python-operator-dataflow/file_saver_op.py b/examples/python-operator-dataflow/file_saver_op.py new file mode 100644 index 0000000000000000000000000000000000000000..592e10f5a7f9e15cb231014c91a13a728fb36565 --- /dev/null +++ b/examples/python-operator-dataflow/file_saver_op.py @@ -0,0 +1,44 @@ +import pyarrow as pa + +from dora import DoraStatus + + +class Operator: + """ + Inferring object from images + """ + + def __init__(self): + self.last_file = "" + self.last_path = "" + self.last_netadata = None + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT" and dora_event["id"] == "file": + input = dora_event["value"][0].as_py() + + with open(input["path"], "r") as file: + self.last_file = file.read() + self.last_path = input["path"] + self.last_metadata = dora_event["metadata"] + with open(input["path"], "w") as file: + file.write(input["raw"]) + + send_output( + "saved_file", + pa.array( + [ + { + "raw": input["raw"], + "path": input["path"], + "origin": dora_event["id"], + } + ] + ), + dora_event["metadata"], + ) + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/keyboard_op.py b/examples/python-operator-dataflow/keyboard_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2d179ac6309bdbf1ff159ddbd9947167d2f133bb --- /dev/null +++ b/examples/python-operator-dataflow/keyboard_op.py @@ -0,0 +1,65 @@ +from pynput import keyboard +from pynput.keyboard import Key, Events +import pyarrow as pa +from dora import Node + + +node = Node() +buffer_text = "" +ctrl = False +submitted_text = [] +cursor = 0 + +NODE_TOPIC = ["record", "send", "ask", "change"] + +with keyboard.Events() as events: + while True: + dora_event = node.next(0.01) + if ( + dora_event is not None + and dora_event["type"] == "INPUT" + and dora_event["id"] == "recording" + ): + buffer_text += dora_event["value"][0].as_py() + node.send_output("buffer", pa.array([buffer_text])) + continue + + event = events.get(1.0) + if event is not None and isinstance(event, Events.Press): + if hasattr(event.key, "char"): + cursor = 0 + buffer_text += event.key.char + node.send_output("buffer", pa.array([buffer_text])) + else: + if event.key == Key.backspace: + buffer_text = buffer_text[:-1] + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.esc: + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.enter: + node.send_output("submitted", pa.array([buffer_text])) + first_word = buffer_text.split(" ")[0] + if first_word in NODE_TOPIC: + node.send_output(first_word, pa.array([buffer_text])) + submitted_text.append(buffer_text) + buffer_text = "" + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.ctrl: + ctrl = True + elif event.key == Key.space: + buffer_text += " " + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.up: + if len(submitted_text) > 0: + cursor = max(cursor - 1, -len(submitted_text)) + buffer_text = submitted_text[cursor] + node.send_output("buffer", pa.array([buffer_text])) + elif event.key == Key.down: + if len(submitted_text) > 0: + cursor = min(cursor + 1, 0) + buffer_text = submitted_text[cursor] + node.send_output("buffer", pa.array([buffer_text])) + elif event is not None and isinstance(event, Events.Release): + if event.key == Key.ctrl: + ctrl = False diff --git a/examples/python-operator-dataflow/llm_op.py b/examples/python-operator-dataflow/llm_op.py new file mode 100644 index 0000000000000000000000000000000000000000..23b9352510a2bb5e988148f3ff96cc5a034e3e25 --- /dev/null +++ b/examples/python-operator-dataflow/llm_op.py @@ -0,0 +1,314 @@ +from dora import DoraStatus +import pylcs +import os +import pyarrow as pa +from transformers import AutoModelForCausalLM, AutoTokenizer +import json + +import re +import time + +MODEL_NAME_OR_PATH = "/home/peiji/deepseek-coder-6.7B-instruct-GPTQ" +# MODEL_NAME_OR_PATH = "hanspeterlyngsoeraaschoujensen/deepseek-math-7b-instruct-GPTQ" + +CODE_MODIFIER_TEMPLATE = """ +### Instruction +Respond with the small modified code only. No explanation. + +```python +{code} +``` + +{user_message} + +### Response: +""" + + +MESSAGE_SENDER_TEMPLATE = """ +### Instruction +You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explanation needed. No code needed. +The schema for those json are: +- line: Int[4] + +The response should look like this: +```json + {{ "topic": "line", "data": [10, 10, 90, 10] }} +``` + +{user_message} + +### Response: +""" + +ASSISTANT_TEMPLATE = """ +### Instruction +You're a helpuf assistant named dora. +Reply with a short message. No code needed. + +User {user_message} + +### Response: +""" + + +model = AutoModelForCausalLM.from_pretrained( + MODEL_NAME_OR_PATH, + device_map="auto", + trust_remote_code=True, + revision="main", +) + + +tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True) + + +def extract_python_code_blocks(text): + """ + Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. + + Parameters: + - text: A string that may contain one or more Python code blocks. + + Returns: + - A list of strings, where each string is a block of Python code extracted from the text. + """ + pattern = r"```python\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```python\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + else: + matches = [remove_last_line(matches[0])] + + return matches + + +def extract_json_code_blocks(text): + """ + Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. + + Parameters: + - text: A string that may contain one or more json code blocks. + + Returns: + - A list of strings, where each string is a block of json code extracted from the text. + """ + pattern = r"```json\n(.*?)\n```" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + pattern = r"```json\n(.*?)(?:\n```|$)" + matches = re.findall(pattern, text, re.DOTALL) + if len(matches) == 0: + return [text] + + return matches + + +def remove_last_line(python_code): + """ + Removes the last line from a given string of Python code. + + Parameters: + - python_code: A string representing Python source code. + + Returns: + - A string with the last line removed. + """ + lines = python_code.split("\n") # Split the string into lines + if lines: # Check if there are any lines to remove + lines.pop() # Remove the last line + return "\n".join(lines) # Join the remaining lines back into a string + + +def calculate_similarity(source, target): + """ + Calculate a similarity score between the source and target strings. + This uses the edit distance relative to the length of the strings. + """ + edit_distance = pylcs.edit_distance(source, target) + max_length = max(len(source), len(target)) + # Normalize the score by the maximum possible edit distance (the length of the longer string) + similarity = 1 - (edit_distance / max_length) + return similarity + + +def find_best_match_location(source_code, target_block): + """ + Find the best match for the target_block within the source_code by searching line by line, + considering blocks of varying lengths. + """ + source_lines = source_code.split("\n") + target_lines = target_block.split("\n") + + best_similarity = 0 + best_start_index = 0 + best_end_index = -1 + + # Iterate over the source lines to find the best matching range for all lines in target_block + for start_index in range(len(source_lines) - len(target_lines) + 1): + for end_index in range(start_index + len(target_lines), len(source_lines) + 1): + current_window = "\n".join(source_lines[start_index:end_index]) + current_similarity = calculate_similarity(current_window, target_block) + if current_similarity > best_similarity: + best_similarity = current_similarity + best_start_index = start_index + best_end_index = end_index + + # Convert line indices back to character indices for replacement + char_start_index = len("\n".join(source_lines[:best_start_index])) + ( + 1 if best_start_index > 0 else 0 + ) + char_end_index = len("\n".join(source_lines[:best_end_index])) + + return char_start_index, char_end_index + + +def replace_code_in_source(source_code, replacement_block: str): + """ + Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. + """ + replacement_block = extract_python_code_blocks(replacement_block)[0] + start_index, end_index = find_best_match_location(source_code, replacement_block) + if start_index != -1 and end_index != -1: + # Replace the best matching part with the replacement block + new_source = ( + source_code[:start_index] + replacement_block + source_code[end_index:] + ) + return new_source + else: + return source_code + + +class Operator: + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier": + input = dora_event["value"][0].as_py() + + with open(input["path"], "r", encoding="utf8") as f: + code = f.read() + + user_message = input["user_message"] + start_llm = time.time() + output = self.ask_llm( + CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message) + ) + + source_code = replace_code_in_source(code, output) + print("response time:", time.time() - start_llm, flush=True) + send_output( + "modified_file", + pa.array( + [ + { + "raw": source_code, + "path": input["path"], + "response": output, + "prompt": input["user_message"], + } + ] + ), + dora_event["metadata"], + ) + print("response: ", output, flush=True) + send_output( + "assistant_message", + pa.array([output]), + dora_event["metadata"], + ) + elif dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender": + user_message = dora_event["value"][0].as_py() + output = self.ask_llm( + MESSAGE_SENDER_TEMPLATE.format(user_message=user_message) + ) + outputs = extract_json_code_blocks(output)[0] + try: + output = json.loads(outputs) + if not isinstance(output["data"], list): + output["data"] = [output["data"]] + + if output["topic"] in [ + "line", + ]: + send_output( + output["topic"], + pa.array(output["data"]), + dora_event["metadata"], + ) + else: + print("Could not find the topic: {}".format(output["topic"])) + except: + print("Could not parse json") + # if data is not iterable, put data in a list + elif dora_event["type"] == "INPUT" and dora_event["id"] == "assistant": + user_message = dora_event["value"][0].as_py() + output = self.ask_llm(ASSISTANT_TEMPLATE.format(user_message=user_message)) + send_output( + "assistant_message", + pa.array([output]), + dora_event["metadata"], + ) + return DoraStatus.CONTINUE + + def ask_llm(self, prompt): + + # Generate output + # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt)) + input = tokenizer(prompt, return_tensors="pt") + input_ids = input.input_ids.cuda() + + # add attention mask here + attention_mask = input["attention_mask"].cuda() + + output = model.generate( + inputs=input_ids, + temperature=0.7, + do_sample=True, + top_p=0.95, + top_k=40, + max_new_tokens=512, + attention_mask=attention_mask, + eos_token_id=tokenizer.eos_token_id, + ) + # Get the tokens from the output, decode them, print them + + # Get text between im_start and im_end + return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :] + + +if __name__ == "__main__": + op = Operator() + + # Path to the current file + current_file_path = __file__ + + # Directory of the current file + current_directory = os.path.dirname(current_file_path) + + path = current_directory + "object_detection.py" + with open(path, "r", encoding="utf8") as f: + raw = f.read() + + op.on_event( + { + "type": "INPUT", + "id": "message_sender", + "value": pa.array( + [ + { + "path": path, + "user_message": "send a star ", + }, + ] + ), + "metadata": [], + }, + print, + ) diff --git a/examples/python-operator-dataflow/microphone_op.py b/examples/python-operator-dataflow/microphone_op.py new file mode 100644 index 0000000000000000000000000000000000000000..b6fb6e63ac70e691dd9f316033bb55ea45b0ca42 --- /dev/null +++ b/examples/python-operator-dataflow/microphone_op.py @@ -0,0 +1,34 @@ +import numpy as np +import pyarrow as pa +import sounddevice as sd + +from dora import DoraStatus + +# Set the parameters for recording +SAMPLE_RATE = 16000 +MAX_DURATION = 5 + + +class Operator: + """ + Microphone operator that records the audio + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + audio_data = sd.rec( + int(SAMPLE_RATE * MAX_DURATION), + samplerate=SAMPLE_RATE, + channels=1, + dtype=np.int16, + blocking=True, + ) + + audio_data = audio_data.ravel().astype(np.float32) / 32768.0 + if len(audio_data) > 0: + send_output("audio", pa.array(audio_data), dora_event["metadata"]) + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/object_detection.py b/examples/python-operator-dataflow/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..91384aa55f605c1ac439547d7cc5e4cb36d29eb0 --- /dev/null +++ b/examples/python-operator-dataflow/object_detection.py @@ -0,0 +1,41 @@ +import numpy as np +import pyarrow as pa +import sys +print(sys.version) +from dora import DoraStatus +from ultralytics import YOLO + + +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 + + +model = YOLO("/home/peiji/yolov8n.pt") + + +class Operator: + """ + Inferring object from images + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + frame = ( + dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) + ) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = model(frame, verbose=False) # includes NMS + # Process results + boxes = np.array(results[0].boxes.xyxy.cpu()) + conf = np.array(results[0].boxes.conf.cpu()) + label = np.array(results[0].boxes.cls.cpu()) + # concatenate them together + arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + + send_output("bbox", pa.array(arrays.ravel()), dora_event["metadata"]) + + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_file_saver.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_file_saver.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_keyboard.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_keyboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_llm.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..e794d2387759a64d4665eedfafbd2ee0cece21e2 --- /dev/null +++ b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_llm.txt @@ -0,0 +1,10 @@ +CUDA extension not installed. +CUDA extension not installed. +/home/peiji/anaconda3/envs/cu122/lib/python3.10/site-packages/transformers/modeling_utils.py:4371: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( +The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +Some weights of the model checkpoint at /home/peiji/deepseek-coder-6.7B-instruct-GPTQ were not used when initializing LlamaForCausalLM: ['model.layers.0.mlp.down_proj.bias', 'model.layers.0.mlp.gate_proj.bias', 'model.layers.0.mlp.up_proj.bias', 'model.layers.0.self_attn.k_proj.bias', 'model.layers.0.self_attn.o_proj.bias', 'model.layers.0.self_attn.q_proj.bias', 'model.layers.0.self_attn.v_proj.bias', 'model.layers.1.mlp.down_proj.bias', 'model.layers.1.mlp.gate_proj.bias', 'model.layers.1.mlp.up_proj.bias', 'model.layers.1.self_attn.k_proj.bias', 'model.layers.1.self_attn.o_proj.bias', 'model.layers.1.self_attn.q_proj.bias', 'model.layers.1.self_attn.v_proj.bias', 'model.layers.10.mlp.down_proj.bias', 'model.layers.10.mlp.gate_proj.bias', 'model.layers.10.mlp.up_proj.bias', 'model.layers.10.self_attn.k_proj.bias', 'model.layers.10.self_attn.o_proj.bias', 'model.layers.10.self_attn.q_proj.bias', 'model.layers.10.self_attn.v_proj.bias', 'model.layers.11.mlp.down_proj.bias', 'model.layers.11.mlp.gate_proj.bias', 'model.layers.11.mlp.up_proj.bias', 'model.layers.11.self_attn.k_proj.bias', 'model.layers.11.self_attn.o_proj.bias', 'model.layers.11.self_attn.q_proj.bias', 'model.layers.11.self_attn.v_proj.bias', 'model.layers.12.mlp.down_proj.bias', 'model.layers.12.mlp.gate_proj.bias', 'model.layers.12.mlp.up_proj.bias', 'model.layers.12.self_attn.k_proj.bias', 'model.layers.12.self_attn.o_proj.bias', 'model.layers.12.self_attn.q_proj.bias', 'model.layers.12.self_attn.v_proj.bias', 'model.layers.13.mlp.down_proj.bias', 'model.layers.13.mlp.gate_proj.bias', 'model.layers.13.mlp.up_proj.bias', 'model.layers.13.self_attn.k_proj.bias', 'model.layers.13.self_attn.o_proj.bias', 'model.layers.13.self_attn.q_proj.bias', 'model.layers.13.self_attn.v_proj.bias', 'model.layers.14.mlp.down_proj.bias', 'model.layers.14.mlp.gate_proj.bias', 'model.layers.14.mlp.up_proj.bias', 'model.layers.14.self_attn.k_proj.bias', 'model.layers.14.self_attn.o_proj.bias', 'model.layers.14.self_attn.q_proj.bias', 'model.layers.14.self_attn.v_proj.bias', 'model.layers.15.mlp.down_proj.bias', 'model.layers.15.mlp.gate_proj.bias', 'model.layers.15.mlp.up_proj.bias', 'model.layers.15.self_attn.k_proj.bias', 'model.layers.15.self_attn.o_proj.bias', 'model.layers.15.self_attn.q_proj.bias', 'model.layers.15.self_attn.v_proj.bias', 'model.layers.16.mlp.down_proj.bias', 'model.layers.16.mlp.gate_proj.bias', 'model.layers.16.mlp.up_proj.bias', 'model.layers.16.self_attn.k_proj.bias', 'model.layers.16.self_attn.o_proj.bias', 'model.layers.16.self_attn.q_proj.bias', 'model.layers.16.self_attn.v_proj.bias', 'model.layers.17.mlp.down_proj.bias', 'model.layers.17.mlp.gate_proj.bias', 'model.layers.17.mlp.up_proj.bias', 'model.layers.17.self_attn.k_proj.bias', 'model.layers.17.self_attn.o_proj.bias', 'model.layers.17.self_attn.q_proj.bias', 'model.layers.17.self_attn.v_proj.bias', 'model.layers.18.mlp.down_proj.bias', 'model.layers.18.mlp.gate_proj.bias', 'model.layers.18.mlp.up_proj.bias', 'model.layers.18.self_attn.k_proj.bias', 'model.layers.18.self_attn.o_proj.bias', 'model.layers.18.self_attn.q_proj.bias', 'model.layers.18.self_attn.v_proj.bias', 'model.layers.19.mlp.down_proj.bias', 'model.layers.19.mlp.gate_proj.bias', 'model.layers.19.mlp.up_proj.bias', 'model.layers.19.self_attn.k_proj.bias', 'model.layers.19.self_attn.o_proj.bias', 'model.layers.19.self_attn.q_proj.bias', 'model.layers.19.self_attn.v_proj.bias', 'model.layers.2.mlp.down_proj.bias', 'model.layers.2.mlp.gate_proj.bias', 'model.layers.2.mlp.up_proj.bias', 'model.layers.2.self_attn.k_proj.bias', 'model.layers.2.self_attn.o_proj.bias', 'model.layers.2.self_attn.q_proj.bias', 'model.layers.2.self_attn.v_proj.bias', 'model.layers.20.mlp.down_proj.bias', 'model.layers.20.mlp.gate_proj.bias', 'model.layers.20.mlp.up_proj.bias', 'model.layers.20.self_attn.k_proj.bias', 'model.layers.20.self_attn.o_proj.bias', 'model.layers.20.self_attn.q_proj.bias', 'model.layers.20.self_attn.v_proj.bias', 'model.layers.21.mlp.down_proj.bias', 'model.layers.21.mlp.gate_proj.bias', 'model.layers.21.mlp.up_proj.bias', 'model.layers.21.self_attn.k_proj.bias', 'model.layers.21.self_attn.o_proj.bias', 'model.layers.21.self_attn.q_proj.bias', 'model.layers.21.self_attn.v_proj.bias', 'model.layers.22.mlp.down_proj.bias', 'model.layers.22.mlp.gate_proj.bias', 'model.layers.22.mlp.up_proj.bias', 'model.layers.22.self_attn.k_proj.bias', 'model.layers.22.self_attn.o_proj.bias', 'model.layers.22.self_attn.q_proj.bias', 'model.layers.22.self_attn.v_proj.bias', 'model.layers.23.mlp.down_proj.bias', 'model.layers.23.mlp.gate_proj.bias', 'model.layers.23.mlp.up_proj.bias', 'model.layers.23.self_attn.k_proj.bias', 'model.layers.23.self_attn.o_proj.bias', 'model.layers.23.self_attn.q_proj.bias', 'model.layers.23.self_attn.v_proj.bias', 'model.layers.24.mlp.down_proj.bias', 'model.layers.24.mlp.gate_proj.bias', 'model.layers.24.mlp.up_proj.bias', 'model.layers.24.self_attn.k_proj.bias', 'model.layers.24.self_attn.o_proj.bias', 'model.layers.24.self_attn.q_proj.bias', 'model.layers.24.self_attn.v_proj.bias', 'model.layers.25.mlp.down_proj.bias', 'model.layers.25.mlp.gate_proj.bias', 'model.layers.25.mlp.up_proj.bias', 'model.layers.25.self_attn.k_proj.bias', 'model.layers.25.self_attn.o_proj.bias', 'model.layers.25.self_attn.q_proj.bias', 'model.layers.25.self_attn.v_proj.bias', 'model.layers.26.mlp.down_proj.bias', 'model.layers.26.mlp.gate_proj.bias', 'model.layers.26.mlp.up_proj.bias', 'model.layers.26.self_attn.k_proj.bias', 'model.layers.26.self_attn.o_proj.bias', 'model.layers.26.self_attn.q_proj.bias', 'model.layers.26.self_attn.v_proj.bias', 'model.layers.27.mlp.down_proj.bias', 'model.layers.27.mlp.gate_proj.bias', 'model.layers.27.mlp.up_proj.bias', 'model.layers.27.self_attn.k_proj.bias', 'model.layers.27.self_attn.o_proj.bias', 'model.layers.27.self_attn.q_proj.bias', 'model.layers.27.self_attn.v_proj.bias', 'model.layers.28.mlp.down_proj.bias', 'model.layers.28.mlp.gate_proj.bias', 'model.layers.28.mlp.up_proj.bias', 'model.layers.28.self_attn.k_proj.bias', 'model.layers.28.self_attn.o_proj.bias', 'model.layers.28.self_attn.q_proj.bias', 'model.layers.28.self_attn.v_proj.bias', 'model.layers.29.mlp.down_proj.bias', 'model.layers.29.mlp.gate_proj.bias', 'model.layers.29.mlp.up_proj.bias', 'model.layers.29.self_attn.k_proj.bias', 'model.layers.29.self_attn.o_proj.bias', 'model.layers.29.self_attn.q_proj.bias', 'model.layers.29.self_attn.v_proj.bias', 'model.layers.3.mlp.down_proj.bias', 'model.layers.3.mlp.gate_proj.bias', 'model.layers.3.mlp.up_proj.bias', 'model.layers.3.self_attn.k_proj.bias', 'model.layers.3.self_attn.o_proj.bias', 'model.layers.3.self_attn.q_proj.bias', 'model.layers.3.self_attn.v_proj.bias', 'model.layers.30.mlp.down_proj.bias', 'model.layers.30.mlp.gate_proj.bias', 'model.layers.30.mlp.up_proj.bias', 'model.layers.30.self_attn.k_proj.bias', 'model.layers.30.self_attn.o_proj.bias', 'model.layers.30.self_attn.q_proj.bias', 'model.layers.30.self_attn.v_proj.bias', 'model.layers.31.mlp.down_proj.bias', 'model.layers.31.mlp.gate_proj.bias', 'model.layers.31.mlp.up_proj.bias', 'model.layers.31.self_attn.k_proj.bias', 'model.layers.31.self_attn.o_proj.bias', 'model.layers.31.self_attn.q_proj.bias', 'model.layers.31.self_attn.v_proj.bias', 'model.layers.4.mlp.down_proj.bias', 'model.layers.4.mlp.gate_proj.bias', 'model.layers.4.mlp.up_proj.bias', 'model.layers.4.self_attn.k_proj.bias', 'model.layers.4.self_attn.o_proj.bias', 'model.layers.4.self_attn.q_proj.bias', 'model.layers.4.self_attn.v_proj.bias', 'model.layers.5.mlp.down_proj.bias', 'model.layers.5.mlp.gate_proj.bias', 'model.layers.5.mlp.up_proj.bias', 'model.layers.5.self_attn.k_proj.bias', 'model.layers.5.self_attn.o_proj.bias', 'model.layers.5.self_attn.q_proj.bias', 'model.layers.5.self_attn.v_proj.bias', 'model.layers.6.mlp.down_proj.bias', 'model.layers.6.mlp.gate_proj.bias', 'model.layers.6.mlp.up_proj.bias', 'model.layers.6.self_attn.k_proj.bias', 'model.layers.6.self_attn.o_proj.bias', 'model.layers.6.self_attn.q_proj.bias', 'model.layers.6.self_attn.v_proj.bias', 'model.layers.7.mlp.down_proj.bias', 'model.layers.7.mlp.gate_proj.bias', 'model.layers.7.mlp.up_proj.bias', 'model.layers.7.self_attn.k_proj.bias', 'model.layers.7.self_attn.o_proj.bias', 'model.layers.7.self_attn.q_proj.bias', 'model.layers.7.self_attn.v_proj.bias', 'model.layers.8.mlp.down_proj.bias', 'model.layers.8.mlp.gate_proj.bias', 'model.layers.8.mlp.up_proj.bias', 'model.layers.8.self_attn.k_proj.bias', 'model.layers.8.self_attn.o_proj.bias', 'model.layers.8.self_attn.q_proj.bias', 'model.layers.8.self_attn.v_proj.bias', 'model.layers.9.mlp.down_proj.bias', 'model.layers.9.mlp.gate_proj.bias', 'model.layers.9.mlp.up_proj.bias', 'model.layers.9.self_attn.k_proj.bias', 'model.layers.9.self_attn.o_proj.bias', 'model.layers.9.self_attn.q_proj.bias', 'model.layers.9.self_attn.v_proj.bias'] +- This IS expected if you are initializing LlamaForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). +- This IS NOT expected if you are initializing LlamaForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_microphone.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_microphone.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_object_detection.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_plot.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_plot.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_vectordb.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_vectordb.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_webcam.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_whisper.txt b/examples/python-operator-dataflow/out/019033f7-83ed-7ae3-94ed-2126572f675c/log_whisper.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_file_saver.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_file_saver.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_keyboard.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_keyboard.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_llm.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..e794d2387759a64d4665eedfafbd2ee0cece21e2 --- /dev/null +++ b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_llm.txt @@ -0,0 +1,10 @@ +CUDA extension not installed. +CUDA extension not installed. +/home/peiji/anaconda3/envs/cu122/lib/python3.10/site-packages/transformers/modeling_utils.py:4371: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead + warnings.warn( +The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class +Some weights of the model checkpoint at /home/peiji/deepseek-coder-6.7B-instruct-GPTQ were not used when initializing LlamaForCausalLM: ['model.layers.0.mlp.down_proj.bias', 'model.layers.0.mlp.gate_proj.bias', 'model.layers.0.mlp.up_proj.bias', 'model.layers.0.self_attn.k_proj.bias', 'model.layers.0.self_attn.o_proj.bias', 'model.layers.0.self_attn.q_proj.bias', 'model.layers.0.self_attn.v_proj.bias', 'model.layers.1.mlp.down_proj.bias', 'model.layers.1.mlp.gate_proj.bias', 'model.layers.1.mlp.up_proj.bias', 'model.layers.1.self_attn.k_proj.bias', 'model.layers.1.self_attn.o_proj.bias', 'model.layers.1.self_attn.q_proj.bias', 'model.layers.1.self_attn.v_proj.bias', 'model.layers.10.mlp.down_proj.bias', 'model.layers.10.mlp.gate_proj.bias', 'model.layers.10.mlp.up_proj.bias', 'model.layers.10.self_attn.k_proj.bias', 'model.layers.10.self_attn.o_proj.bias', 'model.layers.10.self_attn.q_proj.bias', 'model.layers.10.self_attn.v_proj.bias', 'model.layers.11.mlp.down_proj.bias', 'model.layers.11.mlp.gate_proj.bias', 'model.layers.11.mlp.up_proj.bias', 'model.layers.11.self_attn.k_proj.bias', 'model.layers.11.self_attn.o_proj.bias', 'model.layers.11.self_attn.q_proj.bias', 'model.layers.11.self_attn.v_proj.bias', 'model.layers.12.mlp.down_proj.bias', 'model.layers.12.mlp.gate_proj.bias', 'model.layers.12.mlp.up_proj.bias', 'model.layers.12.self_attn.k_proj.bias', 'model.layers.12.self_attn.o_proj.bias', 'model.layers.12.self_attn.q_proj.bias', 'model.layers.12.self_attn.v_proj.bias', 'model.layers.13.mlp.down_proj.bias', 'model.layers.13.mlp.gate_proj.bias', 'model.layers.13.mlp.up_proj.bias', 'model.layers.13.self_attn.k_proj.bias', 'model.layers.13.self_attn.o_proj.bias', 'model.layers.13.self_attn.q_proj.bias', 'model.layers.13.self_attn.v_proj.bias', 'model.layers.14.mlp.down_proj.bias', 'model.layers.14.mlp.gate_proj.bias', 'model.layers.14.mlp.up_proj.bias', 'model.layers.14.self_attn.k_proj.bias', 'model.layers.14.self_attn.o_proj.bias', 'model.layers.14.self_attn.q_proj.bias', 'model.layers.14.self_attn.v_proj.bias', 'model.layers.15.mlp.down_proj.bias', 'model.layers.15.mlp.gate_proj.bias', 'model.layers.15.mlp.up_proj.bias', 'model.layers.15.self_attn.k_proj.bias', 'model.layers.15.self_attn.o_proj.bias', 'model.layers.15.self_attn.q_proj.bias', 'model.layers.15.self_attn.v_proj.bias', 'model.layers.16.mlp.down_proj.bias', 'model.layers.16.mlp.gate_proj.bias', 'model.layers.16.mlp.up_proj.bias', 'model.layers.16.self_attn.k_proj.bias', 'model.layers.16.self_attn.o_proj.bias', 'model.layers.16.self_attn.q_proj.bias', 'model.layers.16.self_attn.v_proj.bias', 'model.layers.17.mlp.down_proj.bias', 'model.layers.17.mlp.gate_proj.bias', 'model.layers.17.mlp.up_proj.bias', 'model.layers.17.self_attn.k_proj.bias', 'model.layers.17.self_attn.o_proj.bias', 'model.layers.17.self_attn.q_proj.bias', 'model.layers.17.self_attn.v_proj.bias', 'model.layers.18.mlp.down_proj.bias', 'model.layers.18.mlp.gate_proj.bias', 'model.layers.18.mlp.up_proj.bias', 'model.layers.18.self_attn.k_proj.bias', 'model.layers.18.self_attn.o_proj.bias', 'model.layers.18.self_attn.q_proj.bias', 'model.layers.18.self_attn.v_proj.bias', 'model.layers.19.mlp.down_proj.bias', 'model.layers.19.mlp.gate_proj.bias', 'model.layers.19.mlp.up_proj.bias', 'model.layers.19.self_attn.k_proj.bias', 'model.layers.19.self_attn.o_proj.bias', 'model.layers.19.self_attn.q_proj.bias', 'model.layers.19.self_attn.v_proj.bias', 'model.layers.2.mlp.down_proj.bias', 'model.layers.2.mlp.gate_proj.bias', 'model.layers.2.mlp.up_proj.bias', 'model.layers.2.self_attn.k_proj.bias', 'model.layers.2.self_attn.o_proj.bias', 'model.layers.2.self_attn.q_proj.bias', 'model.layers.2.self_attn.v_proj.bias', 'model.layers.20.mlp.down_proj.bias', 'model.layers.20.mlp.gate_proj.bias', 'model.layers.20.mlp.up_proj.bias', 'model.layers.20.self_attn.k_proj.bias', 'model.layers.20.self_attn.o_proj.bias', 'model.layers.20.self_attn.q_proj.bias', 'model.layers.20.self_attn.v_proj.bias', 'model.layers.21.mlp.down_proj.bias', 'model.layers.21.mlp.gate_proj.bias', 'model.layers.21.mlp.up_proj.bias', 'model.layers.21.self_attn.k_proj.bias', 'model.layers.21.self_attn.o_proj.bias', 'model.layers.21.self_attn.q_proj.bias', 'model.layers.21.self_attn.v_proj.bias', 'model.layers.22.mlp.down_proj.bias', 'model.layers.22.mlp.gate_proj.bias', 'model.layers.22.mlp.up_proj.bias', 'model.layers.22.self_attn.k_proj.bias', 'model.layers.22.self_attn.o_proj.bias', 'model.layers.22.self_attn.q_proj.bias', 'model.layers.22.self_attn.v_proj.bias', 'model.layers.23.mlp.down_proj.bias', 'model.layers.23.mlp.gate_proj.bias', 'model.layers.23.mlp.up_proj.bias', 'model.layers.23.self_attn.k_proj.bias', 'model.layers.23.self_attn.o_proj.bias', 'model.layers.23.self_attn.q_proj.bias', 'model.layers.23.self_attn.v_proj.bias', 'model.layers.24.mlp.down_proj.bias', 'model.layers.24.mlp.gate_proj.bias', 'model.layers.24.mlp.up_proj.bias', 'model.layers.24.self_attn.k_proj.bias', 'model.layers.24.self_attn.o_proj.bias', 'model.layers.24.self_attn.q_proj.bias', 'model.layers.24.self_attn.v_proj.bias', 'model.layers.25.mlp.down_proj.bias', 'model.layers.25.mlp.gate_proj.bias', 'model.layers.25.mlp.up_proj.bias', 'model.layers.25.self_attn.k_proj.bias', 'model.layers.25.self_attn.o_proj.bias', 'model.layers.25.self_attn.q_proj.bias', 'model.layers.25.self_attn.v_proj.bias', 'model.layers.26.mlp.down_proj.bias', 'model.layers.26.mlp.gate_proj.bias', 'model.layers.26.mlp.up_proj.bias', 'model.layers.26.self_attn.k_proj.bias', 'model.layers.26.self_attn.o_proj.bias', 'model.layers.26.self_attn.q_proj.bias', 'model.layers.26.self_attn.v_proj.bias', 'model.layers.27.mlp.down_proj.bias', 'model.layers.27.mlp.gate_proj.bias', 'model.layers.27.mlp.up_proj.bias', 'model.layers.27.self_attn.k_proj.bias', 'model.layers.27.self_attn.o_proj.bias', 'model.layers.27.self_attn.q_proj.bias', 'model.layers.27.self_attn.v_proj.bias', 'model.layers.28.mlp.down_proj.bias', 'model.layers.28.mlp.gate_proj.bias', 'model.layers.28.mlp.up_proj.bias', 'model.layers.28.self_attn.k_proj.bias', 'model.layers.28.self_attn.o_proj.bias', 'model.layers.28.self_attn.q_proj.bias', 'model.layers.28.self_attn.v_proj.bias', 'model.layers.29.mlp.down_proj.bias', 'model.layers.29.mlp.gate_proj.bias', 'model.layers.29.mlp.up_proj.bias', 'model.layers.29.self_attn.k_proj.bias', 'model.layers.29.self_attn.o_proj.bias', 'model.layers.29.self_attn.q_proj.bias', 'model.layers.29.self_attn.v_proj.bias', 'model.layers.3.mlp.down_proj.bias', 'model.layers.3.mlp.gate_proj.bias', 'model.layers.3.mlp.up_proj.bias', 'model.layers.3.self_attn.k_proj.bias', 'model.layers.3.self_attn.o_proj.bias', 'model.layers.3.self_attn.q_proj.bias', 'model.layers.3.self_attn.v_proj.bias', 'model.layers.30.mlp.down_proj.bias', 'model.layers.30.mlp.gate_proj.bias', 'model.layers.30.mlp.up_proj.bias', 'model.layers.30.self_attn.k_proj.bias', 'model.layers.30.self_attn.o_proj.bias', 'model.layers.30.self_attn.q_proj.bias', 'model.layers.30.self_attn.v_proj.bias', 'model.layers.31.mlp.down_proj.bias', 'model.layers.31.mlp.gate_proj.bias', 'model.layers.31.mlp.up_proj.bias', 'model.layers.31.self_attn.k_proj.bias', 'model.layers.31.self_attn.o_proj.bias', 'model.layers.31.self_attn.q_proj.bias', 'model.layers.31.self_attn.v_proj.bias', 'model.layers.4.mlp.down_proj.bias', 'model.layers.4.mlp.gate_proj.bias', 'model.layers.4.mlp.up_proj.bias', 'model.layers.4.self_attn.k_proj.bias', 'model.layers.4.self_attn.o_proj.bias', 'model.layers.4.self_attn.q_proj.bias', 'model.layers.4.self_attn.v_proj.bias', 'model.layers.5.mlp.down_proj.bias', 'model.layers.5.mlp.gate_proj.bias', 'model.layers.5.mlp.up_proj.bias', 'model.layers.5.self_attn.k_proj.bias', 'model.layers.5.self_attn.o_proj.bias', 'model.layers.5.self_attn.q_proj.bias', 'model.layers.5.self_attn.v_proj.bias', 'model.layers.6.mlp.down_proj.bias', 'model.layers.6.mlp.gate_proj.bias', 'model.layers.6.mlp.up_proj.bias', 'model.layers.6.self_attn.k_proj.bias', 'model.layers.6.self_attn.o_proj.bias', 'model.layers.6.self_attn.q_proj.bias', 'model.layers.6.self_attn.v_proj.bias', 'model.layers.7.mlp.down_proj.bias', 'model.layers.7.mlp.gate_proj.bias', 'model.layers.7.mlp.up_proj.bias', 'model.layers.7.self_attn.k_proj.bias', 'model.layers.7.self_attn.o_proj.bias', 'model.layers.7.self_attn.q_proj.bias', 'model.layers.7.self_attn.v_proj.bias', 'model.layers.8.mlp.down_proj.bias', 'model.layers.8.mlp.gate_proj.bias', 'model.layers.8.mlp.up_proj.bias', 'model.layers.8.self_attn.k_proj.bias', 'model.layers.8.self_attn.o_proj.bias', 'model.layers.8.self_attn.q_proj.bias', 'model.layers.8.self_attn.v_proj.bias', 'model.layers.9.mlp.down_proj.bias', 'model.layers.9.mlp.gate_proj.bias', 'model.layers.9.mlp.up_proj.bias', 'model.layers.9.self_attn.k_proj.bias', 'model.layers.9.self_attn.o_proj.bias', 'model.layers.9.self_attn.q_proj.bias', 'model.layers.9.self_attn.v_proj.bias'] +- This IS expected if you are initializing LlamaForCausalLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). +- This IS NOT expected if you are initializing LlamaForCausalLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_microphone.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_microphone.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_object_detection.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_plot.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_plot.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_vectordb.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_vectordb.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_webcam.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_whisper.txt b/examples/python-operator-dataflow/out/019033f9-854f-70a9-a648-460ab5d4ade6/log_whisper.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/python-operator-dataflow/plot.py b/examples/python-operator-dataflow/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b0a0bedd5f05f105c99d5da56aeee24330c7fe --- /dev/null +++ b/examples/python-operator-dataflow/plot.py @@ -0,0 +1,118 @@ +import os +import cv2 +import time + +from dora import DoraStatus +from utils import LABELS + + +CI = os.environ.get("CI") + +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 + +FONT = cv2.FONT_HERSHEY_SIMPLEX + + +class Operator: + """ + Plot image and bounding box + """ + + def __init__(self): + self.bboxs = [] + self.buffer = "" + self.submitted = [] + self.lines = [] + + def on_event( + self, + dora_event, + send_output, + ): + if dora_event["type"] == "INPUT": + id = dora_event["id"] + value = dora_event["value"] + if id == "image": + + image = ( + value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy() + ) + + for bbox in self.bboxs: + [ + min_x, + min_y, + max_x, + max_y, + confidence, + label, + ] = bbox + cv2.rectangle( + image, + (int(min_x), int(min_y)), + (int(max_x), int(max_y)), + (0, 255, 0), + ) + cv2.putText( + image, + f"{LABELS[int(label)]}, {confidence:0.2f}", + (int(max_x), int(max_y)), + FONT, + 0.5, + (0, 255, 0), + ) + + cv2.putText( + image, self.buffer, (20, 14 + 21 * 14), FONT, 0.5, (190, 250, 0), 1 + ) + + i = 0 + for text in self.submitted[::-1]: + color = ( + (0, 255, 190) + if text["role"] == "user_message" + else (0, 190, 255) + ) + cv2.putText( + image, + text["content"], + ( + 20, + 14 + (19 - i) * 14, + ), + FONT, + 0.5, + color, + 1, + ) + i += 1 + + for line in self.lines: + cv2.line( + image, + (int(line[0]), int(line[1])), + (int(line[2]), int(line[3])), + (0, 0, 255), + 2, + ) + + if CI != "true": + cv2.imshow("frame", image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + elif id == "bbox": + self.bboxs = value.to_numpy().reshape((-1, 6)) + elif id == "keyboard_buffer": + self.buffer = value[0].as_py() + elif id == "line": + self.lines += [value.to_pylist()] + elif "message" in id: + self.submitted += [ + { + "role": id, + "content": value[0].as_py(), + } + ] + + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/requirements.txt b/examples/python-operator-dataflow/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..68020faa6a97cfdf90e87c88ce76b9eded4e6fb0 --- /dev/null +++ b/examples/python-operator-dataflow/requirements.txt @@ -0,0 +1,47 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +ultralytics +gitpython +ipython # interactive notebook +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.1 +Pillow>=7.1.2 +psutil # system resources +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +thop>=0.1.1 # FLOPs computation +torch # see https://pytorch.org/get-started/locally (recommended) +torchvision +tqdm>=4.64.0 + +# Logging ------------------------------------- +tensorboard>=2.4.1 +# wandb +# clearml + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=5.2 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow + +opencv-python>=4.1.1 +maturin diff --git a/examples/python-operator-dataflow/requirements_llm.txt b/examples/python-operator-dataflow/requirements_llm.txt new file mode 100644 index 0000000000000000000000000000000000000000..4cc16fa36d60498c3e9910888cf84ae6d00b26d3 --- /dev/null +++ b/examples/python-operator-dataflow/requirements_llm.txt @@ -0,0 +1,9 @@ +openai-whisper +sounddevice +pynput +sentence-transformers +transformers +pylcs +accelerate +optimum +auto-gptq>=0.7.1 diff --git a/examples/python-operator-dataflow/run.rs b/examples/python-operator-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..d6534cf6ba24f939b1c69119aed89445b10b1857 --- /dev/null +++ b/examples/python-operator-dataflow/run.rs @@ -0,0 +1,100 @@ +use dora_core::{get_pip_path, get_python_path, run}; +use dora_tracing::set_up_tracing; +use eyre::{bail, ContextCompat, WrapErr}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("python-operator-dataflow-runner")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + run( + get_python_path().context("Could not get python binary")?, + &["-m", "venv", "../.env"], + None, + ) + .await + .context("failed to create venv")?; + let venv = &root.join("examples").join(".env"); + std::env::set_var( + "VIRTUAL_ENV", + venv.to_str().context("venv path not valid unicode")?, + ); + let orig_path = std::env::var("PATH")?; + // bin folder is named Scripts on windows. + // 🤦‍♂️ See: https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + let venv_bin = if cfg!(windows) { + venv.join("Scripts") + } else { + venv.join("bin") + }; + + if cfg!(windows) { + std::env::set_var( + "PATH", + format!( + "{};{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } else { + std::env::set_var( + "PATH", + format!( + "{}:{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } + + run( + get_python_path().context("Could not get pip binary")?, + &["-m", "pip", "install", "--upgrade", "pip"], + None, + ) + .await + .context("failed to install pip")?; + run( + get_pip_path().context("Could not get pip binary")?, + &["install", "-r", "requirements.txt"], + None, + ) + .await + .context("pip install failed")?; + + run( + "maturin", + &["develop"], + Some(&root.join("apis").join("python").join("node")), + ) + .await + .context("maturin develop failed")?; + + if std::env::var("CONDA_EXE").is_ok() { + let dataflow = Path::new("dataflow.yml"); + run_dataflow(dataflow).await?; + } else { + let dataflow = Path::new("dataflow_conda.yml"); + run_dataflow(dataflow).await?; + } + + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/python-operator-dataflow/sentence_transformers_op.py b/examples/python-operator-dataflow/sentence_transformers_op.py new file mode 100644 index 0000000000000000000000000000000000000000..674df6367d3938ab70aa53cf791005ff2e59e0ee --- /dev/null +++ b/examples/python-operator-dataflow/sentence_transformers_op.py @@ -0,0 +1,92 @@ +from sentence_transformers import SentenceTransformer +from sentence_transformers import util + +from dora import DoraStatus +import os +import sys +import torch +import pyarrow as pa + +SHOULD_BE_INCLUDED = [ + "webcam.py", + "object_detection.py", + "plot.py", +] + + +## Get all python files path in given directory +def get_all_functions(path): + raw = [] + paths = [] + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith(".py"): + if file not in SHOULD_BE_INCLUDED: + continue + path = os.path.join(root, file) + with open(path, "r", encoding="utf8") as f: + ## add file folder to system path + sys.path.append(root) + ## import module from path + raw.append(f.read()) + paths.append(path) + + return raw, paths + + +def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None): + cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0] + top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True) + out = [] + for score, idx in zip(top_results[0], top_results[1]): + out.extend([raw[idx], paths[idx], score]) + return out + + +class Operator: + """ """ + + def __init__(self): + ## TODO: Add a initialisation step + self.model = SentenceTransformer("/home/peiji/bge-large-en-v1.5/") + self.encoding = [] + # file directory + path = os.path.dirname(os.path.abspath(__file__)) + + self.raw, self.path = get_all_functions(path) + # Encode all files + self.encoding = self.model.encode(self.raw) + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + if dora_event["id"] == "query": + values = dora_event["value"].to_pylist() + + query_embeddings = self.model.encode(values) + output = search( + query_embeddings, + self.encoding, + self.path, + self.raw, + ) + [raw, path, score] = output[0:3] + send_output( + "raw_file", + pa.array([{"raw": raw, "path": path, "user_message": values[0]}]), + dora_event["metadata"], + ) + else: + input = dora_event["value"][0].as_py() + index = self.path.index(input["path"]) + self.raw[index] = input["raw"] + self.encoding[index] = self.model.encode([input["raw"]])[0] + + return DoraStatus.CONTINUE + + +if __name__ == "__main__": + operator = Operator() diff --git a/examples/python-operator-dataflow/utils.py b/examples/python-operator-dataflow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a40bd6d6f76e01d1ae278647ffea07acec84f898 --- /dev/null +++ b/examples/python-operator-dataflow/utils.py @@ -0,0 +1,82 @@ +LABELS = [ + "person", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush", +] diff --git a/examples/python-operator-dataflow/webcam.py b/examples/python-operator-dataflow/webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..43ce7e207a8d4df6c410c19560eaa1fe7ab4b42b --- /dev/null +++ b/examples/python-operator-dataflow/webcam.py @@ -0,0 +1,75 @@ +import os +import time + +import cv2 +import numpy as np +import pyarrow as pa + +from dora import DoraStatus + +CAMERA_WIDTH = 640 +CAMERA_HEIGHT = 480 +CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0)) +CI = os.environ.get("CI") + +font = cv2.FONT_HERSHEY_SIMPLEX + + +class Operator: + """ + Sending image from webcam to the dataflow + """ + + def __init__(self): + self.video_capture = cv2.VideoCapture(CAMERA_INDEX) + self.start_time = time.time() + self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH) + self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT) + self.failure_count = 0 + + def on_event( + self, + dora_event: str, + send_output, + ) -> DoraStatus: + event_type = dora_event["type"] + if event_type == "INPUT": + ret, frame = self.video_capture.read() + if ret: + frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT)) + self.failure_count = 0 + ## Push an error image in case the camera is not available. + else: + if self.failure_count > 10: + frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (CAMERA_INDEX), + (int(30), int(30)), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) + else: + self.failure_count += 1 + return DoraStatus.CONTINUE + + send_output( + "image", + pa.array(frame.ravel()), + dora_event["metadata"], + ) + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) + + if time.time() - self.start_time < 20 or CI != "true": + return DoraStatus.CONTINUE + else: + return DoraStatus.STOP + + def __del__(self): + self.video_capture.release() diff --git a/examples/python-operator-dataflow/whisper_op.py b/examples/python-operator-dataflow/whisper_op.py new file mode 100644 index 0000000000000000000000000000000000000000..feab8b92e8d8018fe7901eaa5eee9f81f6c0817d --- /dev/null +++ b/examples/python-operator-dataflow/whisper_op.py @@ -0,0 +1,25 @@ +import pyarrow as pa +import whisper + +from dora import DoraStatus + + +model = whisper.load_model("base") + + +class Operator: + """ + Transforming Speech to Text using OpenAI Whisper model + """ + + def on_event( + self, + dora_event, + send_output, + ) -> DoraStatus: + if dora_event["type"] == "INPUT": + audio = dora_event["value"].to_numpy() + audio = whisper.pad_or_trim(audio) + result = model.transcribe(audio, language="en") + send_output("text", pa.array([result["text"]]), dora_event["metadata"]) + return DoraStatus.CONTINUE diff --git a/examples/python-operator-dataflow/yolov8n.pt b/examples/python-operator-dataflow/yolov8n.pt new file mode 100644 index 0000000000000000000000000000000000000000..5d0becea028c1952ecc77c608b46b246e8254c88 --- /dev/null +++ b/examples/python-operator-dataflow/yolov8n.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95 +size 6534387 diff --git a/examples/python-ros2-dataflow/control_node.py b/examples/python-ros2-dataflow/control_node.py new file mode 100644 index 0000000000000000000000000000000000000000..7540c0923996baf69ada33feebcf74f347785d0d --- /dev/null +++ b/examples/python-ros2-dataflow/control_node.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import random +from dora import Node +import pyarrow as pa + +node = Node() + +for i in range(500): + event = node.next() + if event is None: + break + if event["type"] == "INPUT": + event_id = event["id"] + if event_id == "turtle_pose": + print( + f"""Pose: {event["value"].tolist()}""".replace("\r", "").replace( + "\n", " " + ) + ) + elif event_id == "tick": + direction = { + "linear": { + "x": 1.0 + random.random(), + }, + "angular": {"z": (random.random() - 0.5) * 5}, + } + + node.send_output( + "direction", + pa.array([direction]), + ) diff --git a/examples/python-ros2-dataflow/dataflow.yml b/examples/python-ros2-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..809c8d950a68cdec54671ba82d8cf8773fc21909 --- /dev/null +++ b/examples/python-ros2-dataflow/dataflow.yml @@ -0,0 +1,17 @@ +nodes: + - id: turtle + custom: + source: ./random_turtle.py + inputs: + direction: control/direction + outputs: + - turtle_pose + + - id: control + custom: + source: ./control_node.py + inputs: + turtle_pose: turtle/turtle_pose + tick: dora/timer/millis/500 + outputs: + - direction diff --git a/examples/python-ros2-dataflow/random_turtle.py b/examples/python-ros2-dataflow/random_turtle.py new file mode 100644 index 0000000000000000000000000000000000000000..1e690d0791ae3a5b329845bd85538049f6a35a42 --- /dev/null +++ b/examples/python-ros2-dataflow/random_turtle.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from dora import Node, Ros2Context, Ros2NodeOptions, Ros2QosPolicies + +CHECK_TICK = 50 + +# Create a ROS2 Context +ros2_context = Ros2Context() +ros2_node = ros2_context.new_node( + "turtle_teleop", + "/ros2_demo", + Ros2NodeOptions(rosout=True), +) + +# Define a ROS2 QOS +topic_qos = Ros2QosPolicies(reliable=True, max_blocking_time=0.1) + +# Create a publisher to cmd_vel topic +turtle_twist_topic = ros2_node.create_topic( + "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos +) +twist_writer = ros2_node.create_publisher(turtle_twist_topic) + +# Create a listener to pose topic +turtle_pose_topic = ros2_node.create_topic("/turtle1/pose", "turtlesim/Pose", topic_qos) +pose_reader = ros2_node.create_subscription(turtle_pose_topic) + +# Create a dora node +dora_node = Node() + +# Listen for both stream on the same loop as Python does not handle well multiprocessing +dora_node.merge_external_events(pose_reader) + +print("looping", flush=True) + +# take track of minimum and maximum coordinates of turtle +min_x = 1000 +max_x = 0 +min_y = 1000 +max_y = 0 + +for i in range(500): + event = dora_node.next() + if event is None: + break + event_kind = event["kind"] + # Dora event + if event_kind == "dora": + event_type = event["type"] + if event_type == "INPUT": + event_id = event["id"] + if event_id == "direction": + twist_writer.publish(event["value"]) + + # ROS2 Event + elif event_kind == "external": + pose = event.inner()[0].as_py() + min_x = min([min_x, pose["x"]]) + max_x = max([max_x, pose["x"]]) + min_y = min([min_y, pose["y"]]) + max_y = max([max_y, pose["y"]]) + dora_node.send_output("turtle_pose", event.inner()) + +assert max_x - min_x > 1 or max_y - min_y > 1, "no turtle movement" diff --git a/examples/python-ros2-dataflow/requirements.txt b/examples/python-ros2-dataflow/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa0c5e6247f2c6348b17e604c953bca3623e8b96 --- /dev/null +++ b/examples/python-ros2-dataflow/requirements.txt @@ -0,0 +1,2 @@ +pyarrow +maturin diff --git a/examples/python-ros2-dataflow/run.rs b/examples/python-ros2-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..8eecdf6495e5aa8a6c8b052bde4bf4d6d1764be4 --- /dev/null +++ b/examples/python-ros2-dataflow/run.rs @@ -0,0 +1,95 @@ +use dora_core::{get_pip_path, get_python_path, run}; +use dora_tracing::set_up_tracing; +use eyre::{bail, ContextCompat, WrapErr}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("python-ros2-dataflow-runner")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + run( + get_python_path().context("Could not get python binary")?, + &["-m", "venv", "../.env"], + None, + ) + .await + .context("failed to create venv")?; + let venv = &root.join("examples").join(".env"); + std::env::set_var( + "VIRTUAL_ENV", + venv.to_str().context("venv path not valid unicode")?, + ); + let orig_path = std::env::var("PATH")?; + // bin folder is named Scripts on windows. + // 🤦‍♂️ See: https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + let venv_bin = if cfg!(windows) { + venv.join("Scripts") + } else { + venv.join("bin") + }; + + if cfg!(windows) { + std::env::set_var( + "PATH", + format!( + "{};{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } else { + std::env::set_var( + "PATH", + format!( + "{}:{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } + + run( + get_python_path().context("Could not get pip binary")?, + &["-m", "pip", "install", "--upgrade", "pip"], + None, + ) + .await + .context("failed to install pip")?; + run( + get_pip_path().context("Could not get pip binary")?, + &["install", "-r", "requirements.txt"], + None, + ) + .await + .context("pip install failed")?; + + run( + "maturin", + &["develop"], + Some(&root.join("apis").join("python").join("node")), + ) + .await + .context("maturin develop failed")?; + + let dataflow = Path::new("dataflow.yml"); + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/rerun-viewer/README.md b/examples/rerun-viewer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0f481bf60151d89bbae18361cbe4e77992bad6d9 --- /dev/null +++ b/examples/rerun-viewer/README.md @@ -0,0 +1,25 @@ +# Python Dataflow Example + +This examples shows how to create and connect dora to rerun. + +This nodes is still experimental and format for passing Images, Bounding boxes, and text are probably going to change in the future. + +## Getting Started + +```bash +cargo install --force rerun-cli@0.15.1 + +## To install this package +git clone git@github.com:dora-rs/dora.git +cargo install --git https://github.com/dora-rs/dora dora-rerun + +dora start dataflow.yml --attach +``` + +You will see two visualizations. One from matplotlib and one from rerun for comparison. + +## CI/CD + +This example is not tested on the CI/CD as visualization is not really testable. + +Please reach out in case of issues at: https://github.com/dora-rs/dora/issues diff --git a/examples/rerun-viewer/dataflow.yml b/examples/rerun-viewer/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..b02526c62dfdad4b3413f8db7960e92f51d52135 --- /dev/null +++ b/examples/rerun-viewer/dataflow.yml @@ -0,0 +1,48 @@ +nodes: + - id: webcam + custom: + source: ./webcam.py + inputs: + tick: + source: dora/timer/millis/10 + queue_size: 1000 + outputs: + - image + - text + envs: + IMAGE_WIDTH: 960 + IMAGE_HEIGHT: 540 + + + - id: object_detection + custom: + source: ./object_detection.py + inputs: + image: webcam/image + outputs: + - bbox + envs: + IMAGE_WIDTH: 960 + IMAGE_HEIGHT: 540 + + - id: rerun + custom: + source: dora-rerun + inputs: + image: webcam/image + text: webcam/text + boxes2d: object_detection/bbox + envs: + IMAGE_WIDTH: 960 + IMAGE_HEIGHT: 540 + IMAGE_DEPTH: 3 + + # - id: matplotlib + # custom: + # source: ./plot.py + # inputs: + # image: webcam/image + # # bbox: object_detection/bbox + # envs: + # IMAGE_WIDTH: 960 + # IMAGE_HEIGHT: 540 \ No newline at end of file diff --git a/examples/rerun-viewer/object_detection.py b/examples/rerun-viewer/object_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2e155899364d643273002ff28c4985c90f9b8a --- /dev/null +++ b/examples/rerun-viewer/object_detection.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import cv2 +import numpy as np +from ultralytics import YOLO + +from dora import Node +import pyarrow as pa + +model = YOLO("/home/peiji/yolov8n.pt") + +node = Node() + +IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) +IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + event_id = event["id"] + if event_id == "image": + print("[object detection] received image input") + image = event["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) + + frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB) + results = model(frame) # includes NMS + # Process results + boxes = np.array(results[0].boxes.xywh.cpu()) + conf = np.array(results[0].boxes.conf.cpu()) + label = np.array(results[0].boxes.cls.cpu()) + # concatenate them together + arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1) + + node.send_output("bbox", pa.array(arrays.ravel()), event["metadata"]) + else: + print("[object detection] ignoring unexpected input:", event_id) + elif event_type == "STOP": + print("[object detection] received stop") + elif event_type == "ERROR": + print("[object detection] error: ", event["error"]) + else: + print("[object detection] received unexpected event:", event_type) diff --git a/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_matplotlib.txt b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_matplotlib.txt new file mode 100644 index 0000000000000000000000000000000000000000..b18938a318f1e7744bdbd3e412677dc14c0ef8ce --- /dev/null +++ b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_matplotlib.txt @@ -0,0 +1 @@ +received stop diff --git a/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_object_detection.txt b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1e821fd1ec09eb0a2dfcc6e2de1117f50817df8 --- /dev/null +++ b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_object_detection.txt @@ -0,0 +1,12288 @@ +[object detection] received image input + +0: 384x640 (no detections), 62.9ms +Speed: 3.4ms preprocess, 62.9ms inference, 16.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 1.9ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 1.9ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 1.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 2.8ms preprocess, 5.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 2.8ms preprocess, 5.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 3.4ms preprocess, 4.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 3.2ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 2.3ms preprocess, 5.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.1ms +Speed: 3.3ms preprocess, 4.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.6ms +Speed: 2.2ms preprocess, 4.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.5ms +Speed: 2.8ms preprocess, 4.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 2.7ms preprocess, 3.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.3ms +Speed: 3.0ms preprocess, 5.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 3.6ms preprocess, 3.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.3ms +Speed: 3.1ms preprocess, 4.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 4.0ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.5ms +Speed: 3.0ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 3.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.1ms +Speed: 2.7ms preprocess, 4.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 2.0ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 3.3ms preprocess, 4.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 2.0ms preprocess, 3.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 3.1ms preprocess, 3.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.9ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.7ms +Speed: 2.0ms preprocess, 3.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 2.2ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.4ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.1ms +Speed: 3.3ms preprocess, 4.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 4.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 2.4ms preprocess, 3.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.5ms preprocess, 5.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.2ms +Speed: 3.2ms preprocess, 4.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 4.2ms preprocess, 4.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 2.8ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 3.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.4ms +Speed: 2.1ms preprocess, 5.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 3.2ms preprocess, 3.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 3.0ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.8ms +Speed: 3.4ms preprocess, 3.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.6ms preprocess, 5.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.6ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 3.8ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.7ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.5ms +Speed: 3.3ms preprocess, 4.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 3.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.3ms +Speed: 3.5ms preprocess, 4.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.3ms +Speed: 2.5ms preprocess, 4.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 1.9ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.0ms preprocess, 5.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 2.3ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.0ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.0ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 1.9ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 1.9ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.8ms +Speed: 2.0ms preprocess, 3.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.0ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 2.3ms preprocess, 3.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.2ms +Speed: 2.4ms preprocess, 4.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 3.1ms preprocess, 3.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.7ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 3.0ms preprocess, 3.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.4ms +Speed: 2.6ms preprocess, 5.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.2ms +Speed: 2.4ms preprocess, 4.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 3.6ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 1.9ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.4ms preprocess, 2.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 3.5ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.7ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.2ms +Speed: 2.3ms preprocess, 4.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 1.9ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 4.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 3.2ms preprocess, 4.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.8ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 2.2ms preprocess, 4.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 2.4ms preprocess, 4.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.7ms +Speed: 2.3ms preprocess, 4.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.7ms +Speed: 3.2ms preprocess, 3.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 4.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.5ms preprocess, 3.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 3.1ms preprocess, 5.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 2.8ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 2.9ms preprocess, 3.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.8ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.8ms +Speed: 3.1ms preprocess, 3.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 3.3ms preprocess, 5.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.1ms +Speed: 3.3ms preprocess, 4.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.1ms +Speed: 2.5ms preprocess, 4.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.0ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 3.2ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.7ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 3.1ms preprocess, 4.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 1.9ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 3.3ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 2.0ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 3.2ms preprocess, 3.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 3.0ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 2.8ms preprocess, 3.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.9ms +Speed: 3.4ms preprocess, 3.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 3.3ms preprocess, 4.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 3.3ms preprocess, 5.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 1.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 1.9ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 6.1ms +Speed: 3.5ms preprocess, 6.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.6ms +Speed: 3.3ms preprocess, 4.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 2.0ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 1.9ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 1.9ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) diff --git a/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_rerun.txt b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_rerun.txt new file mode 100644 index 0000000000000000000000000000000000000000..dde10b26e1337cab3799b4ecbdee067d0e57b0f4 --- /dev/null +++ b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_rerun.txt @@ -0,0 +1,28 @@ +[2024-06-19T03:35:44Z INFO re_sdk_comms::server] Hosting a SDK server over TCP at 0.0.0.0:9876. Connect with the Rerun logging SDK. +[2024-06-19T03:35:44Z INFO winit::platform_impl::platform::x11::window] Guessed window scale factor: 2 +[2024-06-19T03:35:45Z INFO egui_wgpu] There were 3 available wgpu adapters: {backend: Vulkan, device_type: IntegratedGpu, name: "Intel(R) Graphics (RPL-S)", driver: "Intel open-source Mesa driver", driver_info: "Mesa 23.2.1-1ubuntu3.1~22.04.2", vendor: 0x8086, device: 0xA780}, {backend: Vulkan, device_type: DiscreteGpu, name: "NVIDIA GeForce RTX 4060 Ti", driver: "NVIDIA", driver_info: "535.171.04", vendor: 0x10DE, device: 0x2805}, {backend: Vulkan, device_type: Cpu, name: "llvmpipe (LLVM 15.0.7, 256 bits)", driver: "llvmpipe", driver_info: "Mesa 23.2.1-1ubuntu3.1~22.04.2 (LLVM 15.0.7)", vendor: 0x10005} +[2024-06-19T03:35:45Z INFO tracing::span] perform; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=Auth(Some(External), Some([49, 48, 48, 48])) +[2024-06-19T03:35:45Z INFO tracing::span] read_command; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=NegotiateUnixFD +[2024-06-19T03:35:45Z INFO tracing::span] read_command; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=Begin +[2024-06-19T03:35:45Z INFO tracing::span] socket reader; +[2024-06-19T03:35:45Z INFO tracing::span] perform; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=Auth(Some(External), Some([49, 48, 48, 48])) +[2024-06-19T03:35:45Z INFO tracing::span] read_command; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=NegotiateUnixFD +[2024-06-19T03:35:45Z INFO tracing::span] read_command; +[2024-06-19T03:35:45Z INFO zbus::handshake] write_command; command=Begin +[2024-06-19T03:35:45Z INFO tracing::span] socket reader; +[2024-06-19T03:35:45Z INFO zbus::connection] start_object_server; started_event=None +[2024-06-19T03:35:45Z INFO zbus::connection] {}; obj_server_task_name="ObjectServer task" +[2024-06-19T03:35:45Z INFO zbus::object_server] dispatch_message; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T03:35:45Z INFO zbus::object_server] dispatch_method_call; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T03:35:45Z INFO zbus::object_server] dispatch_method_call_try; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T03:35:45Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:51690 +[2024-06-19T03:35:45Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:51702 +[2024-06-19T03:36:14Z WARN re_sdk_comms::server] Closing connection to client at 127.0.0.1:51702: The receiving end of the channel was closed + 2024-06-19T03:36:15.168457Z  WARN re_sdk_comms::buffered_client: Failed to send message after 3 attempts: Failed to send to Rerun server at 127.0.0.1:9876: Broken pipe (os error 32) + at /home/peiji/.cargo/registry/src/index.crates.io-6f17d22bba15001f/re_sdk_comms-0.15.1/src/buffered_client.rs:299 + diff --git a/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_webcam.txt b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..4bf0f17510e8ec6a7af0107ab9434482d69f825d --- /dev/null +++ b/examples/rerun-viewer/out/01902e90-8d59-7d3f-9203-875f47544be4/log_webcam.txt @@ -0,0 +1,3184 @@ +[ WARN:0@1.130] global cap_v4l.cpp:999 open VIDEOIO(V4L2:/dev/video0): can't open camera by index +[ERROR:0@1.130] global obsensor_uvc_stream_channel.cpp:158 getStreamChannelGroup Camera index out of range +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct diff --git a/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_matplotlib.txt b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_matplotlib.txt new file mode 100644 index 0000000000000000000000000000000000000000..b18938a318f1e7744bdbd3e412677dc14c0ef8ce --- /dev/null +++ b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_matplotlib.txt @@ -0,0 +1 @@ +received stop diff --git a/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_object_detection.txt b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..97646a0d36e3b99e56e9037450fe1c7f75d90cc5 --- /dev/null +++ b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_object_detection.txt @@ -0,0 +1,34408 @@ +[object detection] received image input + +0: 384x640 1 person, 56.6ms +Speed: 4.2ms preprocess, 56.6ms inference, 190.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.8ms +Speed: 1.8ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.7ms +Speed: 1.9ms preprocess, 1.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.6ms +Speed: 1.8ms preprocess, 1.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.6ms +Speed: 1.8ms preprocess, 1.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.8ms +Speed: 2.0ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.0ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 3.0ms preprocess, 4.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.6ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.2ms preprocess, 4.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 3.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 4.5ms +Speed: 2.3ms preprocess, 4.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.9ms +Speed: 3.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 4.2ms +Speed: 3.6ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 3.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.9ms +Speed: 3.3ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 4.8ms +Speed: 2.9ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 cell phone, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.2ms +Speed: 2.1ms preprocess, 4.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 2.2ms preprocess, 3.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.7ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 4.0ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 7.2ms +Speed: 3.4ms preprocess, 7.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 3.5ms preprocess, 5.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.8ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.4ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.3ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.0ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.9ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.3ms preprocess, 4.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.9ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.3ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.6ms preprocess, 4.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.5ms preprocess, 4.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.1ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.8ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.6ms preprocess, 4.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.3ms preprocess, 4.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.2ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 3.2ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.4ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.8ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.6ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.2ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.8ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.7ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.9ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 4.0ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.5ms preprocess, 4.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.6ms preprocess, 4.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.8ms preprocess, 4.9ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.8ms preprocess, 3.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 3.7ms preprocess, 5.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.7ms preprocess, 5.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.3ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.9ms +Speed: 2.3ms preprocess, 5.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.2ms preprocess, 5.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.4ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.5ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.7ms preprocess, 5.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 4.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.3ms preprocess, 4.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.6ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.0ms preprocess, 4.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 7.3ms +Speed: 3.6ms preprocess, 7.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 2.5ms preprocess, 5.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 2.3ms preprocess, 5.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.9ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.5ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.9ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.4ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.5ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.0ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.1ms preprocess, 4.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.8ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 3.7ms preprocess, 5.7ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 5.1ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.5ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.4ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.1ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.9ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 4.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.3ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.1ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.2ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.2ms preprocess, 4.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.2ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.7ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.0ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.1ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.1ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.6ms preprocess, 5.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.2ms preprocess, 4.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 2.3ms preprocess, 5.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.6ms preprocess, 5.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.7ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.9ms +Speed: 2.6ms preprocess, 5.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 3.4ms preprocess, 5.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 3.4ms preprocess, 5.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.6ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.2ms preprocess, 4.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.3ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 5.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 3.5ms preprocess, 5.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.6ms preprocess, 4.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.9ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.0ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.6ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.3ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 2.0ms preprocess, 5.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.1ms preprocess, 4.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.9ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.2ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.8ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.1ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.7ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.3ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.5ms preprocess, 4.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.2ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.9ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.9ms preprocess, 3.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.3ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.4ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.5ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.3ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.7ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.8ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.2ms preprocess, 4.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.1ms preprocess, 4.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.5ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 2.1ms preprocess, 5.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.9ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.1ms preprocess, 4.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.9ms preprocess, 3.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.1ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.1ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.9ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.9ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.7ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.5ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.5ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.1ms preprocess, 4.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 3.3ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.1ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.9ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.9ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.0ms preprocess, 5.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.9ms preprocess, 3.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.2ms preprocess, 4.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.7ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.6ms preprocess, 4.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.9ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.2ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 3.5ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 3.4ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 4.1ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.7ms preprocess, 3.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.0ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.3ms preprocess, 3.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.1ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.0ms preprocess, 3.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 2.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.5ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.8ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.3ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.4ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.5ms preprocess, 3.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.7ms preprocess, 3.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.1ms preprocess, 3.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.8ms preprocess, 5.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.0ms preprocess, 3.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.8ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.2ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.1ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.5ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 3.0ms preprocess, 4.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 6.0ms +Speed: 3.6ms preprocess, 6.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 2.2ms preprocess, 5.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.4ms preprocess, 3.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.3ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.2ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.7ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.8ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.2ms preprocess, 5.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.8ms preprocess, 4.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.8ms +Speed: 2.4ms preprocess, 5.8ms inference, 2.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.8ms preprocess, 4.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.7ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.6ms preprocess, 5.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.4ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.8ms preprocess, 3.6ms inference, 2.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 6.0ms +Speed: 3.3ms preprocess, 6.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 4.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.0ms preprocess, 4.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.6ms preprocess, 4.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.5ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.7ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.7ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.0ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.5ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.8ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.2ms preprocess, 5.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.3ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.8ms preprocess, 3.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.8ms +Speed: 2.1ms preprocess, 5.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.6ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 3.7ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 3.2ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.5ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.0ms preprocess, 3.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.7ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.6ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.1ms preprocess, 3.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.7ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.5ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.3ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.6ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.0ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.6ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.2ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.1ms preprocess, 4.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.6ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.4ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.5ms preprocess, 3.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.6ms preprocess, 4.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.0ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.4ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.5ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.0ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.4ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.2ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.7ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.4ms preprocess, 4.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.4ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.8ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.1ms preprocess, 5.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.3ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.8ms +Speed: 3.4ms preprocess, 5.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 3.3ms preprocess, 5.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.9ms preprocess, 4.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.3ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.2ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.5ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 3.1ms preprocess, 4.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.3ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.7ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.1ms preprocess, 3.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.2ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.6ms preprocess, 2.3ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.7ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.7ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 4.0ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 4.0ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.6ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.2ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.3ms preprocess, 4.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 3.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 3.5ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.0ms +Speed: 2.2ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.1ms +Speed: 3.1ms preprocess, 5.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.6ms +Speed: 3.8ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 3.6ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 3.0ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 3.6ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.6ms +Speed: 4.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.5ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.1ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.3ms preprocess, 4.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.7ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.5ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.7ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.1ms preprocess, 3.9ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 2.3ms preprocess, 5.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.0ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.2ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.2ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 4.0ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.6ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.9ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.9ms preprocess, 3.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.7ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.5ms preprocess, 4.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 2.6ms preprocess, 5.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.7ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.4ms preprocess, 3.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.9ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.5ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.1ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.7ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.2ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 4.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.7ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.3ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.0ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.2ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.1ms preprocess, 4.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.1ms preprocess, 4.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 2.1ms preprocess, 5.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.0ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.7ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.1ms preprocess, 4.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.2ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.0ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.3ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.1ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.2ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.0ms preprocess, 4.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.5ms preprocess, 4.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.2ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.7ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.3ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.7ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.4ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.3ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.4ms preprocess, 4.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.2ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.1ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.2ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.5ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 2.5ms preprocess, 5.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.8ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.0ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.5ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.6ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 3.4ms preprocess, 5.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.6ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.8ms preprocess, 3.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 2.3ms preprocess, 5.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.5ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.8ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.0ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.6ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.2ms preprocess, 4.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.3ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.2ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.9ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.9ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.1ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.1ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1.9ms +Speed: 3.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.2ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 4.1ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.6ms preprocess, 4.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 2.5ms preprocess, 5.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.7ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.6ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.4ms preprocess, 3.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.7ms preprocess, 3.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.1ms preprocess, 3.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.2ms preprocess, 4.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 4.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) diff --git a/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_rerun.txt b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_rerun.txt new file mode 100644 index 0000000000000000000000000000000000000000..ffd776dcd2f8ead118a43b67c793b4951268fccf --- /dev/null +++ b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_rerun.txt @@ -0,0 +1,27 @@ +[2024-06-19T06:48:44Z INFO re_sdk_comms::server] Hosting a SDK server over TCP at 0.0.0.0:9876. Connect with the Rerun logging SDK. +[2024-06-19T06:48:44Z INFO winit::platform_impl::platform::x11::window] Guessed window scale factor: 2 +[2024-06-19T06:48:44Z INFO egui_wgpu] There were 3 available wgpu adapters: {backend: Vulkan, device_type: IntegratedGpu, name: "Intel(R) Graphics (RPL-S)", driver: "Intel open-source Mesa driver", driver_info: "Mesa 23.2.1-1ubuntu3.1~22.04.2", vendor: 0x8086, device: 0xA780}, {backend: Vulkan, device_type: DiscreteGpu, name: "NVIDIA GeForce RTX 4060 Ti", driver: "NVIDIA", driver_info: "535.171.04", vendor: 0x10DE, device: 0x2805}, {backend: Vulkan, device_type: Cpu, name: "llvmpipe (LLVM 15.0.7, 256 bits)", driver: "llvmpipe", driver_info: "Mesa 23.2.1-1ubuntu3.1~22.04.2 (LLVM 15.0.7)", vendor: 0x10005} +[2024-06-19T06:48:44Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:46778 +[2024-06-19T06:48:44Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:46782 +[2024-06-19T06:48:44Z INFO tracing::span] perform; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=Auth(Some(External), Some([49, 48, 48, 48])) +[2024-06-19T06:48:44Z INFO tracing::span] read_command; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=NegotiateUnixFD +[2024-06-19T06:48:44Z INFO tracing::span] read_command; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=Begin +[2024-06-19T06:48:44Z INFO tracing::span] socket reader; +[2024-06-19T06:48:44Z INFO tracing::span] perform; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=Auth(Some(External), Some([49, 48, 48, 48])) +[2024-06-19T06:48:44Z INFO tracing::span] read_command; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=NegotiateUnixFD +[2024-06-19T06:48:44Z INFO tracing::span] read_command; +[2024-06-19T06:48:44Z INFO zbus::handshake] write_command; command=Begin +[2024-06-19T06:48:44Z INFO tracing::span] socket reader; +[2024-06-19T06:48:44Z INFO zbus::connection] start_object_server; started_event=None +[2024-06-19T06:48:44Z INFO zbus::connection] {}; obj_server_task_name="ObjectServer task" +[2024-06-19T06:48:44Z INFO zbus::object_server] dispatch_message; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T06:48:44Z INFO zbus::object_server] dispatch_method_call; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T06:48:44Z INFO zbus::object_server] dispatch_method_call_try; msg=Msg { type: MethodCall, sender: UniqueName(Str(Borrowed(":1.3"))), path: ObjectPath("/org/a11y/atspi/accessible/root"), iface: InterfaceName(Str(Borrowed("org.freedesktop.DBus.Properties"))), member: MemberName(Str(Borrowed("Set"))), body: Signature("ssv") } +[2024-06-19T07:01:08Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:48450 +[2024-06-19T07:01:08Z INFO re_sdk_comms::server] New SDK client connected: 127.0.0.1:48458 +[2024-06-19T07:04:32Z ERROR re_query::archetype_view] found corrupt cell -- mismatched number of instances diff --git a/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_webcam.txt b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3b635a366c4a7efd115c47ed9fcdcc6cc9fbd6c --- /dev/null +++ b/examples/rerun-viewer/out/01902f41-3f48-78b8-9263-2461585c9f1d/log_webcam.txt @@ -0,0 +1,8505 @@ +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct diff --git a/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_object_detection.txt b/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_object_detection.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0a2ab829b6446a6a96ad7f21ea43d1a60ef581a --- /dev/null +++ b/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_object_detection.txt @@ -0,0 +1,33776 @@ +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 55.5ms +Speed: 2.9ms preprocess, 55.5ms inference, 214.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 3.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.8ms +Speed: 2.0ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.7ms +Speed: 2.4ms preprocess, 1.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.9ms +Speed: 1.9ms preprocess, 1.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.7ms +Speed: 1.9ms preprocess, 1.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.8ms +Speed: 2.0ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 1.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1.8ms +Speed: 1.9ms preprocess, 1.8ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.4ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.2ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 4.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.9ms +Speed: 2.7ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 4.1ms +Speed: 3.3ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.6ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 book, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 1 laptop, 1 book, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 book, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 book, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 book, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 keyboard, 4.6ms +Speed: 3.0ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 1 keyboard, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3 laptops, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 laptops, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.6ms preprocess, 5.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 4.6ms +Speed: 3.0ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 4.0ms +Speed: 3.5ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cell phone, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 laptops, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 4.2ms +Speed: 3.0ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 2 keyboards, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 2 keyboards, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 remote, 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2 keyboards, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 remote, 2 keyboards, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 1 book, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 1 book, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 1 book, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2 keyboards, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 laptops, 1 keyboard, 1 book, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 1 book, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2 keyboards, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2 keyboards, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 1 keyboard, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 train, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 remote, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 potted plant, 2 tvs, 1 laptop, 2 keyboards, 4.8ms +Speed: 2.2ms preprocess, 4.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 2 keyboards, 1.9ms +Speed: 2.1ms preprocess, 1.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 2 tvs, 1 laptop, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2 tvs, 1 laptop, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 chair, 2 tvs, 1 laptop, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 bowls, 2 tvs, 1 laptop, 1 remote, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2 tvs, 1 laptop, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 2 tvs, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 1 chair, 2 tvs, 2 laptops, 1 mouse, 1 remote, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 chair, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 chair, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 2 chairs, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 4 cups, 1 chair, 2 tvs, 2 laptops, 1 mouse, 1 remote, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 2 chairs, 1 potted plant, 2 tvs, 1 laptop, 1 remote, 2 keyboards, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 1 chair, 3 tvs, 1 mouse, 1 remote, 2 keyboards, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 3 chairs, 1 potted plant, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2 tvs, 1 laptop, 3 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2 chairs, 2 tvs, 1 laptop, 1 remote, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 chair, 1 tv, 1 laptop, 1 remote, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 chair, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 tv, 2 keyboards, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2 chairs, 1 tv, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 remote, 2 keyboards, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 tv, 1 laptop, 2 keyboards, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 keyboards, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 cup, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 remote, 2 keyboards, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 cup, 2 keyboards, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 cup, 2 keyboards, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 keyboards, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 4.8ms +Speed: 2.3ms preprocess, 4.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 keyboard, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 cup, 1 tv, 1 keyboard, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 1 book, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 chair, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 chair, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 chair, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 bottle, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 laptops, 1 keyboard, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 1 book, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 1 cell phone, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 chairs, 1 tv, 1 laptop, 1 keyboard, 1 book, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 1 keyboard, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 remote, 1 keyboard, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 remote, 1 keyboard, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 4.6ms +Speed: 2.2ms preprocess, 4.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 train, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bus, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bus, 1 train, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 chairs, 1 couch, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 beds, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 1 tv, 1 mouse, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2 tvs, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3 chairs, 1 tv, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 chair, 1 tv, 1 laptop, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 2 tvs, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5 chairs, 2 laptops, 1 keyboard, 1 refrigerator, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5 chairs, 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 3 laptops, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 couch, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 3.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.8ms preprocess, 3.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.0ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.5ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 5.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bird, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bird, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bird, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bird, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1 toothbrush, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 toothbrush, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cell phones, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 1 cell phone, 1 toothbrush, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bird, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.8ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.0ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 3.6ms preprocess, 5.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 3.5ms preprocess, 4.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.5ms preprocess, 3.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.2ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.9ms preprocess, 4.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 2.5ms preprocess, 5.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.5ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.3ms preprocess, 4.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.6ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 4 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 remotes, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 remote, 1 keyboard, 1 book, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 remote, 2 keyboards, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 remote, 2 keyboards, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 mouse, 1 remote, 2 keyboards, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 mouses, 1 remote, 2 keyboards, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dog, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.5ms preprocess, 4.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 4.1ms +Speed: 2.6ms preprocess, 4.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.9ms +Speed: 2.5ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.7ms preprocess, 5.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.1ms preprocess, 4.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 scissors, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 1 scissors, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 cat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 1 laptop, 4.0ms +Speed: 2.8ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bowls, 4.4ms +Speed: 2.3ms preprocess, 4.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 5.6ms +Speed: 4.2ms preprocess, 5.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 5.7ms +Speed: 2.3ms preprocess, 5.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 vase, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.5ms +Speed: 2.5ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.8ms +Speed: 2.2ms preprocess, 4.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 4.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 1 dining table, 1 laptop, 1 scissors, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.8ms +Speed: 2.8ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 1 scissors, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 dining table, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 4.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 1 scissors, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 scissors, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 scissors, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bowl, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 scissors, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 knife, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 knife, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 toothbrush, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tie, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 knife, 1 bowl, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 sink, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cats, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 1 book, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bed, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bottle, 1 bed, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bed, 1 laptop, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bed, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 bed, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 airplane, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 airplane, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 1 bed, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 1 laptop, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 1 laptop, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 bowl, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 bowl, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 bowl, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 laptop, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 laptop, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 1 refrigerator, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 3.8ms +Speed: 2.5ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 vase, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 bowl, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 laptop, 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 refrigerator, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 1 refrigerator, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 car, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 refrigerator, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bowl, 1 laptop, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 4.3ms +Speed: 2.1ms preprocess, 4.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 laptops, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 toothbrush, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 suitcases, 1 skis, 1 snowboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 backpack, 1 suitcase, 1 snowboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 backpack, 2 suitcases, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 backpack, 1 suitcase, 1 skis, 1 skateboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 backpacks, 1 suitcase, 1 skateboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 backpack, 1 suitcase, 1 skis, 1 skateboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 suitcases, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 suitcases, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 suitcases, 1 skis, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 snowboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 backpack, 1 snowboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 snowboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.0ms +Speed: 3.8ms preprocess, 4.0ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 scissors, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 motorcycles, 1 tv, 3.8ms +Speed: 2.8ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 snowboard, 1 tv, 1 scissors, 4.7ms +Speed: 2.3ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 motorcycles, 1 snowboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 motorcycles, 1 snowboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 suitcase, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 1 laptop, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 1 scissors, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tennis racket, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 airplane, 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 motorcycles, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 motorcycles, 1 scissors, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 1 scissors, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tennis racket, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tennis racket, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 tv, 2.9ms +Speed: 3.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 surfboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 motorcycle, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 motorcycle, 1 skateboard, 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.8ms +Speed: 3.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 5.2ms +Speed: 2.9ms preprocess, 5.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 3.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 1 cell phone, 4.2ms +Speed: 2.7ms preprocess, 4.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.6ms +Speed: 3.7ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 skateboard, 1 tv, 1 cell phone, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 4.9ms +Speed: 2.1ms preprocess, 4.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 1 scissors, 5.1ms +Speed: 2.9ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 1 scissors, 3.3ms +Speed: 4.3ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 1 scissors, 3.4ms +Speed: 3.8ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 1 scissors, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cell phone, 1 scissors, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 cell phone, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 1 scissors, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 laptop, 1 cell phone, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 laptop, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 1 scissors, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 laptop, 1 cell phone, 1 scissors, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 1 scissors, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 cell phone, 1 scissors, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 cell phone, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 cell phone, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 tv, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 cell phone, 1 scissors, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 4.8ms +Speed: 3.5ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 5.2ms +Speed: 2.8ms preprocess, 5.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 1 scissors, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 scissors, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 laptops, 1 scissors, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 4.8ms +Speed: 3.3ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 scissors, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 scissors, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 5.0ms +Speed: 2.1ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 skateboard, 1 laptop, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.8ms +Speed: 2.3ms preprocess, 3.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 scissors, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 1 scissors, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 scissors, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 suitcases, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 toilet, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 refrigerator, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 refrigerator, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 refrigerator, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 refrigerator, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 bowl, 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 tv, 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 1 chair, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 refrigerator, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 refrigerator, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 refrigerator, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 refrigerator, 3.7ms +Speed: 2.5ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.7ms +Speed: 3.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.5ms preprocess, 2.9ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.9ms +Speed: 3.6ms preprocess, 3.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 5.0ms +Speed: 2.2ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.7ms +Speed: 2.5ms preprocess, 3.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 5.6ms +Speed: 3.5ms preprocess, 5.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 5.5ms +Speed: 3.2ms preprocess, 5.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.5ms +Speed: 3.5ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bench, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 3.8ms +Speed: 3.1ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 book, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 3.4ms +Speed: 3.4ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 car, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 vase, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 vase, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 1 vase, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 vase, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 remote, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 book, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 bottles, 1 cup, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 bottles, 1 cup, 1 chair, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 chair, 1 tv, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 chair, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 cup, 1 chair, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 chair, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 dining table, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 1 tv, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 dining table, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 1 dining table, 1 microwave, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cakes, 1 dining table, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 1 tv, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 cake, 1 dining table, 1 tv, 1 mouse, 1 keyboard, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 keyboard, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 cup, 1 dining table, 1 tv, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 2 laptops, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 chair, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 keyboard, 1 vase, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 1 vase, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 dining table, 1 tv, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 chair, 1 tv, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cake, 1 dining table, 1 tv, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cake, 1 dining table, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 chair, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 chair, 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 2 cakes, 1 tv, 1 laptop, 2 keyboards, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cake, 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 mouse, 1 vase, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 book, 1 vase, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 keyboard, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 cakes, 2 tvs, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2 cakes, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 3 cakes, 1 tv, 1 laptop, 1 mouse, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cups, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2 keyboards, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cups, 2 cakes, 1 tv, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cups, 1 tv, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cups, 1 tv, 1 laptop, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3 cups, 1 tv, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cups, 1 tv, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2 tvs, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2 laptops, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2 laptops, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2 laptops, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2 laptops, 3.2ms +Speed: 3.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 4.8ms +Speed: 2.8ms preprocess, 4.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.7ms +Speed: 2.6ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2 keyboards, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 3 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 3 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 3 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 mouse, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 5.4ms +Speed: 4.0ms preprocess, 5.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 keyboard, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 keyboard, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 3 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 remote, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 1 keyboard, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 remote, 1 keyboard, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.9ms +Speed: 3.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 4.3ms +Speed: 3.5ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 3.7ms +Speed: 3.9ms preprocess, 3.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 3.0ms +Speed: 2.9ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 remote, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 3.0ms +Speed: 3.6ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.0ms +Speed: 3.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 1 keyboard, 5.2ms +Speed: 3.3ms preprocess, 5.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 5.1ms +Speed: 3.4ms preprocess, 5.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 3.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 5.5ms +Speed: 3.5ms preprocess, 5.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 keyboard, 3.5ms +Speed: 2.9ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 1 mouse, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 3.4ms +Speed: 3.9ms preprocess, 3.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 3.5ms +Speed: 2.2ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 3.7ms +Speed: 3.4ms preprocess, 3.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2 laptops, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 1 laptop, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 tv, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 keyboard, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 mouse, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2.6ms +Speed: 3.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 3.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 remote, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 3.2ms +Speed: 4.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 1 keyboard, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 1 keyboard, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 1 cell phone, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 cakes, 1 tv, 1 laptop, 2 mouses, 1 keyboard, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 5.6ms +Speed: 4.2ms preprocess, 5.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 1 keyboard, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 3 laptops, 2 mouses, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 3 laptops, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 remote, 2 keyboards, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 3.4ms +Speed: 4.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 2 mouses, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 2 keyboards, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 1 book, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 2 keyboards, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 remote, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2 laptops, 1 keyboard, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 keyboard, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 2 keyboards, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 4.9ms +Speed: 2.9ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 5.0ms +Speed: 3.6ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 dining table, 2 tvs, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 2 tvs, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 mouse, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 mouse, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 mouse, 1 keyboard, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 mouse, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 2 keyboards, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2 laptops, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 3.8ms +Speed: 2.3ms preprocess, 3.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 5.1ms +Speed: 3.2ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 remote, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 keyboard, 3.6ms +Speed: 3.2ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 laptop, 1 keyboard, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 1 chair, 1 dining table, 1 tv, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 dining table, 1 tv, 1 keyboard, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 chair, 1 tv, 1 laptop, 1 keyboard, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 tv, 1 remote, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 laptop, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 1 keyboard, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 keyboard, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 5.6ms +Speed: 2.6ms preprocess, 5.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 keyboard, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.2ms +Speed: 3.4ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 cake, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 2 mouses, 1 keyboard, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 2 keyboards, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 1 book, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 3.9ms +Speed: 2.3ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.3ms +Speed: 2.7ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 1 tv, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.0ms +Speed: 3.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 2 laptops, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 cups, 1 tv, 1 laptop, 2 keyboards, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 cups, 2 cakes, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2 tvs, 1 laptop, 1 mouse, 2 keyboards, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 laptop, 1 mouse, 3 keyboards, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 1 laptop, 1 mouse, 1 remote, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 2 tvs, 1 mouse, 2 keyboards, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 tv, 1 laptop, 1 mouse, 1 keyboard, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cake, 1 dining table, 1 tv, 1 laptop, 1 mouse, 2 keyboards, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 2 tvs, 2 laptops, 1 mouse, 2 keyboards, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 cup, 1 tv, 1 mouse, 2 keyboards, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 bottles, 1 cup, 1 tv, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bottle, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 remote, 1 cell phone, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 1 vase, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 tv, 3.4ms +Speed: 4.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 1 mouse, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 mouse, 1 keyboard, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 3.0ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 mouse, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 suitcase, 1 bed, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bed, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cat, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cat, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cat, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cat, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 suitcase, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.7ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 book, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.7ms +Speed: 2.5ms preprocess, 4.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.1ms +Speed: 2.8ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 4.4ms +Speed: 3.5ms preprocess, 4.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.2ms +Speed: 2.5ms preprocess, 5.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 mouse, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.1ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.9ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.5ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.6ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 bed, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.1ms +Speed: 2.2ms preprocess, 5.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 3.1ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.6ms +Speed: 2.3ms preprocess, 4.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.1ms preprocess, 4.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.0ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.6ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 4.9ms +Speed: 3.1ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.9ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tie, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 3.1ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 3.6ms preprocess, 4.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.5ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.9ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.0ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 6.8ms +Speed: 3.8ms preprocess, 6.8ms inference, 2.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.6ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.8ms preprocess, 4.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.5ms preprocess, 4.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 4.0ms preprocess, 5.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 4.0ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.0ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.3ms preprocess, 4.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 4.4ms +Speed: 3.8ms preprocess, 4.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 5.4ms +Speed: 2.6ms preprocess, 5.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 5.0ms +Speed: 2.2ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.9ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.9ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.9ms preprocess, 5.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.5ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.4ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.0ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.2ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 vase, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 1 book, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2 books, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 1 book, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 book, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 books, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 book, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 fork, 1 keyboard, 1 book, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 fork, 1 keyboard, 1 book, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 1 book, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 mouse, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 mouse, 1 keyboard, 1 book, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 mouse, 1 book, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 books, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 umbrella, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 donut, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 1 book, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 clock, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 3.5ms +Speed: 4.3ms preprocess, 3.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 refrigerator, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 book, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 2.8ms +Speed: 3.5ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 refrigerator, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 refrigerator, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 refrigerator, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 refrigerator, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 2.2ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 refrigerator, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 1 laptop, 1 remote, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 laptop, 1 remote, 1 keyboard, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 1 book, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 1 book, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 remotes, 1 keyboard, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 remotes, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 remotes, 2 keyboards, 1 book, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 1 book, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 remotes, 2 keyboards, 1 book, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 1 book, 3.5ms +Speed: 3.4ms preprocess, 3.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 1 book, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 1 book, 3.8ms +Speed: 3.4ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 1 book, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 1 keyboard, 1 book, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 dining table, 1 remote, 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 remote, 1 keyboard, 4.5ms +Speed: 2.3ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 remote, 1 keyboard, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 remote, 1 keyboard, 4.0ms +Speed: 2.9ms preprocess, 4.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2 keyboards, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 chairs, 1 remote, 2 keyboards, 4.0ms +Speed: 3.2ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 remote, 2 keyboards, 1 book, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2 keyboards, 1 book, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 dining table, 1 laptop, 1 remote, 2 keyboards, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 3.8ms +Speed: 3.5ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 remote, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.8ms +Speed: 2.7ms preprocess, 3.8ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 1 tv, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tv, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 laptops, 1 keyboard, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 bird, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 airplane, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 airplane, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 airplane, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.3ms +Speed: 2.9ms preprocess, 3.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 1 keyboard, 1 book, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 1 keyboard, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 mouse, 1 remote, 1 keyboard, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 laptops, 1 mouse, 1 remote, 1 keyboard, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 laptops, 1 remote, 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 laptop, 1 remote, 1 keyboard, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cup, 1 laptop, 1 remote, 1 keyboard, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 1 remote, 1 keyboard, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 laptop, 1 remote, 1 keyboard, 3.3ms +Speed: 2.2ms preprocess, 3.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 1 keyboard, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 remote, 2 keyboards, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 laptop, 1 keyboard, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 1 toothbrush, 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 scissors, 1 toothbrush, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 airplane, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 pizza, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 pizza, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 keyboards, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 keyboards, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 keyboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 tie, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 vase, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 cup, 1 book, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 (no detections), 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.6ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.5ms preprocess, 3.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.8ms +Speed: 3.5ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.7ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.9ms +Speed: 3.7ms preprocess, 5.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.2ms preprocess, 3.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.9ms +Speed: 2.8ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 3.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.8ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.6ms +Speed: 3.3ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 2.7ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.6ms +Speed: 2.3ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 3.2ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.6ms preprocess, 5.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.6ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.5ms preprocess, 4.4ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.6ms +Speed: 2.3ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.8ms +Speed: 4.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.8ms +Speed: 3.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 3.6ms +Speed: 3.2ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 traffic light, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.7ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 2.7ms preprocess, 4.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.9ms +Speed: 2.3ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 3.8ms +Speed: 2.4ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.3ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 4.6ms +Speed: 2.1ms preprocess, 4.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.1ms preprocess, 4.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.3ms preprocess, 3.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 tv, 5.0ms +Speed: 2.8ms preprocess, 5.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 4.4ms +Speed: 4.3ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 tv, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 tv, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 2 persons, 1 tv, 3.6ms +Speed: 3.1ms preprocess, 3.6ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 3 persons, 1 chair, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.0ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.1ms +Speed: 3.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 surfboard, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 umbrella, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 umbrella, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 surfboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.3ms preprocess, 3.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.0ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 baseball bats, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 2.7ms preprocess, 5.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.1ms preprocess, 4.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 2.7ms preprocess, 5.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 1 surfboard, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.6ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 3.6ms preprocess, 3.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.2ms preprocess, 3.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.5ms +Speed: 2.2ms preprocess, 4.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.4ms preprocess, 3.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.6ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.5ms preprocess, 4.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.6ms +Speed: 2.9ms preprocess, 3.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.9ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.7ms preprocess, 4.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.9ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.2ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.2ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 3.4ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.8ms preprocess, 5.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.8ms preprocess, 5.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 3.8ms preprocess, 5.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 3.4ms preprocess, 5.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 2.3ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.9ms +Speed: 2.7ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.8ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.1ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.7ms preprocess, 2.8ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.7ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.3ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.5ms preprocess, 3.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 3.1ms preprocess, 3.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.5ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.3ms preprocess, 5.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.1ms preprocess, 4.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.1ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.7ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.6ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 3.0ms preprocess, 5.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.3ms preprocess, 4.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.2ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 4.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.2ms preprocess, 5.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.1ms preprocess, 3.3ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.2ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.3ms preprocess, 5.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 4.1ms preprocess, 3.4ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.5ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.3ms preprocess, 5.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.5ms preprocess, 5.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.5ms preprocess, 5.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.1ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 2.5ms preprocess, 5.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 3.3ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 4.2ms preprocess, 3.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.3ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 3.0ms preprocess, 3.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.3ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.4ms preprocess, 5.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 baseball bats, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 baseball bats, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 baseball bats, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.2ms +Speed: 3.0ms preprocess, 3.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 baseball bats, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 baseball bat, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 4.1ms +Speed: 2.1ms preprocess, 4.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.4ms +Speed: 3.3ms preprocess, 4.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.0ms +Speed: 2.8ms preprocess, 5.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.4ms +Speed: 3.4ms preprocess, 3.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 3.0ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.6ms +Speed: 2.9ms preprocess, 3.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.5ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 chairs, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.3ms +Speed: 2.1ms preprocess, 4.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 5.2ms +Speed: 2.7ms preprocess, 5.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.4ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.7ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.1ms preprocess, 2.9ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.1ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.2ms +Speed: 2.9ms preprocess, 4.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.9ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.0ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.6ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.1ms preprocess, 4.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.8ms preprocess, 3.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 4.8ms preprocess, 3.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 3.7ms preprocess, 5.5ms inference, 2.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.5ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.8ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 3.4ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.8ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.3ms preprocess, 5.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 3.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.6ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.5ms preprocess, 3.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.4ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 6.6ms +Speed: 3.7ms preprocess, 6.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.8ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 2.1ms preprocess, 5.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.4ms preprocess, 4.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 3.4ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.5ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.4ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.8ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 4.1ms preprocess, 5.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.2ms preprocess, 5.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.2ms preprocess, 4.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.3ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.9ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.4ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.7ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.4ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.2ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.2ms preprocess, 2.6ms inference, 1.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 3.3ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.7ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.8ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 2.9ms preprocess, 3.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 3.4ms preprocess, 3.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.1ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.9ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.8ms +Speed: 3.2ms preprocess, 5.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 3.3ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.3ms +Speed: 3.3ms preprocess, 5.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.8ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 3.3ms preprocess, 4.9ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.8ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.5ms preprocess, 3.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 3.3ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.4ms preprocess, 3.0ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.4ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.2ms preprocess, 4.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 2.1ms preprocess, 5.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.9ms preprocess, 3.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.8ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 2.2ms preprocess, 4.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.3ms preprocess, 5.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.6ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 3.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.3ms preprocess, 3.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.6ms preprocess, 3.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.9ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.3ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.5ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 3.6ms preprocess, 2.8ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.5ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 4.0ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.6ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.6ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.7ms +Speed: 3.4ms preprocess, 5.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.4ms preprocess, 3.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 2.9ms preprocess, 5.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.7ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.8ms +Speed: 2.6ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.1ms preprocess, 3.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.7ms preprocess, 2.4ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.8ms preprocess, 5.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.9ms +Speed: 2.1ms preprocess, 3.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 3.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cell phones, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cell phones, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 book, 4.8ms +Speed: 3.6ms preprocess, 4.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 book, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 1 remote, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.8ms +Speed: 2.5ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.1ms +Speed: 2.4ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 4.3ms +Speed: 2.2ms preprocess, 4.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.7ms preprocess, 2.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 5.7ms +Speed: 3.0ms preprocess, 5.7ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.4ms +Speed: 3.2ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.8ms +Speed: 3.5ms preprocess, 3.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.2ms +Speed: 3.5ms preprocess, 4.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.2ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.9ms preprocess, 3.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.5ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.8ms preprocess, 4.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 4.0ms +Speed: 3.2ms preprocess, 4.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.8ms +Speed: 2.3ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.9ms +Speed: 3.8ms preprocess, 2.9ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.4ms +Speed: 3.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 5.5ms +Speed: 3.7ms preprocess, 5.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.1ms +Speed: 3.3ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.8ms +Speed: 2.9ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.7ms +Speed: 2.1ms preprocess, 4.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 4.3ms +Speed: 3.5ms preprocess, 4.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 3.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 5.8ms +Speed: 3.6ms preprocess, 5.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 2.3ms preprocess, 5.1ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.5ms +Speed: 2.9ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.3ms +Speed: 3.1ms preprocess, 3.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.3ms +Speed: 2.4ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.8ms +Speed: 2.1ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 5.5ms +Speed: 3.7ms preprocess, 5.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.6ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 4.6ms +Speed: 2.6ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.6ms +Speed: 3.1ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 5.5ms +Speed: 3.0ms preprocess, 5.5ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 4.6ms +Speed: 3.3ms preprocess, 4.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 3.7ms +Speed: 2.9ms preprocess, 3.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.6ms +Speed: 3.0ms preprocess, 2.6ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 laptop, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 laptop, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 tv, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 3.1ms preprocess, 3.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 1 cell phone, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 4.5ms +Speed: 2.4ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.2ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.7ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2 cell phones, 2.7ms +Speed: 3.0ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2 cell phones, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 cell phone, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.6ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 3.6ms preprocess, 5.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.4ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.3ms +Speed: 2.2ms preprocess, 4.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.5ms +Speed: 2.3ms preprocess, 3.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.9ms +Speed: 2.6ms preprocess, 3.9ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.2ms +Speed: 2.1ms preprocess, 3.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 3.7ms +Speed: 2.3ms preprocess, 3.7ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.2ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.6ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.6ms +Speed: 3.8ms preprocess, 5.6ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 2.1ms preprocess, 4.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.1ms +Speed: 3.3ms preprocess, 5.1ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.7ms preprocess, 2.1ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.0ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.3ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.5ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.6ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.9ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.0ms +Speed: 3.4ms preprocess, 5.0ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.4ms preprocess, 3.1ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.7ms preprocess, 3.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.8ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.0ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.1ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.7ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 3.6ms preprocess, 5.5ms inference, 1.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.5ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.8ms +Speed: 2.9ms preprocess, 3.8ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.2ms preprocess, 3.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.2ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.8ms +Speed: 3.7ms preprocess, 5.8ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.3ms preprocess, 2.9ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.5ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.3ms preprocess, 2.5ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.1ms preprocess, 3.1ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.8ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 2.2ms preprocess, 3.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.2ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.4ms +Speed: 3.6ms preprocess, 4.4ms inference, 2.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.5ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.0ms preprocess, 3.7ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.1ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.8ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 4.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.9ms +Speed: 2.1ms preprocess, 2.9ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.7ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.8ms +Speed: 2.2ms preprocess, 2.8ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.7ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.0ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.9ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.4ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.2ms preprocess, 4.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.5ms +Speed: 3.1ms preprocess, 4.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.9ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.4ms +Speed: 3.8ms preprocess, 5.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.8ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.3ms preprocess, 2.3ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 3.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.8ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.9ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.3ms +Speed: 3.3ms preprocess, 3.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.0ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.1ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.6ms +Speed: 2.2ms preprocess, 3.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.1ms preprocess, 2.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.4ms preprocess, 2.6ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.6ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.0ms +Speed: 2.2ms preprocess, 4.0ms inference, 1.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.2ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.2ms +Speed: 3.9ms preprocess, 5.2ms inference, 1.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.3ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.7ms +Speed: 2.1ms preprocess, 3.7ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.5ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.2ms preprocess, 2.7ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.6ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 2.1ms preprocess, 3.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 1.2ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.1ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.0ms +Speed: 2.1ms preprocess, 2.0ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.0ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.7ms preprocess, 2.5ms inference, 1.4ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.1ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.1ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 5.5ms +Speed: 4.0ms preprocess, 5.5ms inference, 1.3ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.1ms +Speed: 3.9ms preprocess, 4.1ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.4ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.6ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.5ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.5ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 3.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.2ms +Speed: 3.6ms preprocess, 3.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.4ms +Speed: 2.9ms preprocess, 3.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.4ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 3.4ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.3ms preprocess, 2.3ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.2ms preprocess, 2.6ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.9ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.7ms +Speed: 2.1ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.1ms preprocess, 2.3ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.1ms +Speed: 2.2ms preprocess, 2.1ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.1ms +Speed: 3.0ms preprocess, 3.1ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 2.3ms preprocess, 3.0ms inference, 1.0ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 3.0ms +Speed: 4.7ms preprocess, 3.0ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.9ms +Speed: 2.2ms preprocess, 4.9ms inference, 1.1ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.6ms preprocess, 2.5ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 4.8ms +Speed: 2.1ms preprocess, 4.8ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.1ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.8ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.9ms +Speed: 2.4ms preprocess, 2.9ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.6ms +Speed: 2.3ms preprocess, 2.6ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.4ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.5ms +Speed: 2.4ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.7ms +Speed: 2.5ms preprocess, 2.7ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.4ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.1ms +Speed: 2.3ms preprocess, 2.1ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.3ms preprocess, 2.4ms inference, 0.5ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.3ms preprocess, 2.2ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.2ms +Speed: 2.2ms preprocess, 2.2ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 2.2ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.2ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.5ms +Speed: 2.2ms preprocess, 2.5ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.4ms +Speed: 3.0ms preprocess, 2.4ms inference, 0.6ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 1 chair, 2.4ms +Speed: 2.8ms preprocess, 2.4ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.0ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) +[object detection] received image input + +0: 384x640 1 person, 2.3ms +Speed: 3.1ms preprocess, 2.3ms inference, 0.7ms postprocess per image at shape (1, 3, 384, 640) diff --git a/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_rerun.txt b/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_rerun.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_webcam.txt b/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_webcam.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d3188948b2eb90a98071969de13bc539e95cb75 --- /dev/null +++ b/examples/rerun-viewer/out/01902f4c-9833-7ff3-9725-0c0d105aff9c/log_webcam.txt @@ -0,0 +1,8190 @@ +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct +frame size is not correct diff --git a/examples/rerun-viewer/plot.py b/examples/rerun-viewer/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..d6ec83893e1f4b4580bfa55803c61774167fcba4 --- /dev/null +++ b/examples/rerun-viewer/plot.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +from dora import Node +from dora import DoraStatus + +import cv2 +import numpy as np + +CI = os.environ.get("CI") + +font = cv2.FONT_HERSHEY_SIMPLEX + +IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) +IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) + + +class Plotter: + """ + Plot image and bounding box + """ + + def __init__(self): + self.image = [] + self.bboxs = [] + + def on_input( + self, + dora_input, + ) -> DoraStatus: + """ + Put image and bounding box on cv2 window. + + Args: + dora_input["id"] (str): Id of the dora_input declared in the yaml configuration + dora_input["value"] (arrow array): message of the dora_input + """ + if dora_input["id"] == "image": + image = ( + dora_input["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) + ) + + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + self.image = image + + elif dora_input["id"] == "bbox" and len(self.image) != 0: + bboxs = dora_input["value"].to_numpy() + self.bboxs = np.reshape(bboxs, (-1, 6)) + for bbox in self.bboxs: + [ + x, + y, + w, + h, + confidence, + label, + ] = bbox + cv2.rectangle( + self.image, + (int(x - w / 2), int(y - h / 2)), + (int(x + w / 2), int(y + h / 2)), + (0, 255, 0), + 2, + ) + + if CI != "true": + cv2.imshow("frame", self.image) + if cv2.waitKey(1) & 0xFF == ord("q"): + return DoraStatus.STOP + + return DoraStatus.CONTINUE + + +plotter = Plotter() +node = Node() + +for event in node: + event_type = event["type"] + if event_type == "INPUT": + status = plotter.on_input(event) + if status == DoraStatus.CONTINUE: + pass + elif status == DoraStatus.STOP: + print("plotter returned stop status") + break + elif event_type == "STOP": + print("received stop") + else: + print("received unexpected event:", event_type) diff --git a/examples/rerun-viewer/run.rs b/examples/rerun-viewer/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..a14b553f0144e14a51cfb9577b89b15f7dd574ab --- /dev/null +++ b/examples/rerun-viewer/run.rs @@ -0,0 +1,102 @@ +use dora_core::{get_pip_path, get_python_path, run}; +use dora_download::download_file; +use dora_tracing::set_up_tracing; +use eyre::{bail, ContextCompat, WrapErr}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("python-dataflow-runner")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + run( + get_python_path().context("Could not get python binary")?, + &["-m", "venv", "../.env"], + None, + ) + .await + .context("failed to create venv")?; + let venv = &root.join("examples").join(".env"); + std::env::set_var( + "VIRTUAL_ENV", + venv.to_str().context("venv path not valid unicode")?, + ); + let orig_path = std::env::var("PATH")?; + // bin folder is named Scripts on windows. + // 🤦‍♂️ See: https://github.com/pypa/virtualenv/commit/993ba1316a83b760370f5a3872b3f5ef4dd904c1 + let venv_bin = if cfg!(windows) { + venv.join("Scripts") + } else { + venv.join("bin") + }; + + if cfg!(windows) { + std::env::set_var( + "PATH", + format!( + "{};{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } else { + std::env::set_var( + "PATH", + format!( + "{}:{orig_path}", + venv_bin.to_str().context("venv path not valid unicode")? + ), + ); + } + + run( + get_python_path().context("Could not get pip binary")?, + &["-m", "pip", "install", "--upgrade", "pip"], + None, + ) + .await + .context("failed to install pip")?; + run( + get_pip_path().context("Could not get pip binary")?, + &["install", "-r", "requirements.txt"], + None, + ) + .await + .context("pip install failed")?; + + run( + "maturin", + &["develop"], + Some(&root.join("apis").join("python").join("node")), + ) + .await + .context("maturin develop failed")?; + download_file( + "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt", + Path::new("yolov8n.pt"), + ) + .await + .context("Could not download weights.")?; + + let dataflow = Path::new("dataflow.yml"); + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/rerun-viewer/webcam.py b/examples/rerun-viewer/webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..33a7950dfebc0fa931d6d4bf3f65eb55d4c2a8b2 --- /dev/null +++ b/examples/rerun-viewer/webcam.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import time +import numpy as np +import cv2 + +from dora import Node +import pyarrow as pa + +node = Node() + +IMAGE_INDEX = int(os.getenv("IMAGE_INDEX", 0)) +IMAGE_WIDTH = int(os.getenv("IMAGE_WIDTH", 960)) +IMAGE_HEIGHT = int(os.getenv("IMAGE_HEIGHT", 540)) +video_capture = cv2.VideoCapture(IMAGE_INDEX) +video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH) +video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT) +font = cv2.FONT_HERSHEY_SIMPLEX + +start = time.time() + +# Run for 20 seconds +while time.time() - start < 1000: + # Wait next dora_input + event = node.next() + if event is None: + break + + event_type = event["type"] + if event_type == "INPUT": + ret, frame = video_capture.read() + if not ret: + frame = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH, 3), dtype=np.uint8) + cv2.putText( + frame, + "No Webcam was found at index %d" % (IMAGE_INDEX), + (int(30), int(30)), + font, + 0.75, + (255, 255, 255), + 2, + 1, + ) + if len(frame) != IMAGE_HEIGHT * IMAGE_WIDTH * 3: + print("frame size is not correct") + frame = cv2.resize(frame, (IMAGE_WIDTH, IMAGE_HEIGHT)) + + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + node.send_output( + "image", + pa.array(frame.ravel()), + event["metadata"], + ) + node.send_output("text", pa.array([f"send image at: {time.time()}"])) diff --git a/examples/rust-dataflow-url/dataflow.yml b/examples/rust-dataflow-url/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..b5777685dd4268dbd4e6acd479c4e0163562297a --- /dev/null +++ b/examples/rust-dataflow-url/dataflow.yml @@ -0,0 +1,23 @@ +nodes: + - id: rust-node + custom: + source: https://github.com/dora-rs/dora/releases/download/v0.0.0-test.4/rust-dataflow-example-node + inputs: + tick: dora/timer/millis/300 + outputs: + - random + - id: runtime-node + operators: + - id: rust-operator + shared-library: https://github.com/dora-rs/dora/releases/download/v0.0.0-test.4/librust_dataflow_example_operator.so + inputs: + tick: dora/timer/millis/100 + random: rust-node/random + outputs: + - status + - id: rust-sink + custom: + build: cargo build -p rust-dataflow-example-sink + source: ../../target/debug/rust-dataflow-url-example-sink + inputs: + message: runtime-node/rust-operator/status diff --git a/examples/rust-dataflow-url/run.rs b/examples/rust-dataflow-url/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f511970a8e448e3203e20145f1ac657d9d8f586 --- /dev/null +++ b/examples/rust-dataflow-url/run.rs @@ -0,0 +1,46 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("rust-dataflow-url-runner").wrap_err("failed to set up tracing")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + let dataflow = Path::new("dataflow.yml"); + build_dataflow(dataflow).await?; + + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn build_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--").arg("build").arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to build dataflow"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/rust-dataflow-url/sink/Cargo.toml b/examples/rust-dataflow-url/sink/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5ac556c647b79870f29a38f3b9d55fc8ca5de1d3 --- /dev/null +++ b/examples/rust-dataflow-url/sink/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "rust-dataflow-url-example-sink" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["zenoh"] } +eyre = "0.6.8" diff --git a/examples/rust-dataflow-url/sink/src/main.rs b/examples/rust-dataflow-url/sink/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..f9c932a5266d6e081c86d68f847ce35747d7887a --- /dev/null +++ b/examples/rust-dataflow-url/sink/src/main.rs @@ -0,0 +1,28 @@ +use dora_node_api::{self, DoraNode}; +use eyre::{bail, Context}; + +fn main() -> eyre::Result<()> { + let mut operator = DoraNode::init_from_env()?; + + let inputs = operator.inputs()?; + + while let Ok(input) = inputs.recv() { + match input.id.as_str() { + "message" => { + let data = input.data(); + let received_string = + std::str::from_utf8(&data).wrap_err("received message was not utf8-encoded")?; + println!("received message: {}", received_string); + if !received_string.starts_with("operator received random value ") { + bail!("unexpected message format (should start with 'operator received random value')") + } + if !received_string.ends_with(" ticks") { + bail!("unexpected message format (should end with 'ticks')") + } + } + other => eprintln!("Ignoring unexpected input `{other}`"), + } + } + + Ok(()) +} diff --git a/examples/rust-dataflow/dataflow.yml b/examples/rust-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..30c01ced104452e8966e60a29f0d9878988dc740 --- /dev/null +++ b/examples/rust-dataflow/dataflow.yml @@ -0,0 +1,28 @@ +nodes: + - id: rust-node + build: cargo build -p rust-dataflow-example-node + path: ../../target/debug/rust-dataflow-example-node + inputs: + tick: dora/timer/millis/10 + outputs: + - random + - id: rust-status-node + custom: + build: cargo build -p rust-dataflow-example-status-node + source: ../../target/debug/rust-dataflow-example-status-node + inputs: + tick: dora/timer/millis/100 + random: rust-node/random + outputs: + - status + - id: rust-sink + build: cargo build -p rust-dataflow-example-sink + path: ../../target/debug/rust-dataflow-example-sink + inputs: + message: rust-status-node/status + - id: dora-record + build: cargo build -p dora-record + path: ../../target/debug/dora-record + inputs: + message: rust-status-node/status + random: rust-node/random diff --git a/examples/rust-dataflow/node/Cargo.toml b/examples/rust-dataflow/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6d123330c85e8ce6be2fd13f0d12f33b9fa66079 --- /dev/null +++ b/examples/rust-dataflow/node/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "rust-dataflow-example-node" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" +futures = "0.3.21" +rand = "0.8.5" +tokio = { version = "1.24.2", features = ["rt", "macros"] } diff --git a/examples/rust-dataflow/node/src/main.rs b/examples/rust-dataflow/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..36f42d578bf821676320546b1fdcf67d00942b45 --- /dev/null +++ b/examples/rust-dataflow/node/src/main.rs @@ -0,0 +1,35 @@ +use dora_node_api::{self, dora_core::config::DataId, DoraNode, Event, IntoArrow}; + +fn main() -> eyre::Result<()> { + println!("hello"); + + let output = DataId::from("random".to_owned()); + + let (mut node, mut events) = DoraNode::init_from_env()?; + + for i in 0..100 { + let event = match events.recv() { + Some(input) => input, + None => break, + }; + + match event { + Event::Input { + id, + metadata, + data: _, + } => match id.as_str() { + "tick" => { + let random: u64 = rand::random(); + println!("tick {i}, sending {random:#x}"); + node.send_output(output.clone(), metadata.parameters, random.into_arrow())?; + } + other => eprintln!("Ignoring unexpected input `{other}`"), + }, + Event::Stop => println!("Received manual stop"), + other => eprintln!("Received unexpected input: {other:?}"), + } + } + + Ok(()) +} diff --git a/examples/rust-dataflow/run.rs b/examples/rust-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..f5e035a501efa620c5c5e88c579c6769bcaa07a1 --- /dev/null +++ b/examples/rust-dataflow/run.rs @@ -0,0 +1,46 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("rust-dataflow-runner").wrap_err("failed to set up tracing subscriber")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + let dataflow = Path::new("dataflow.yml"); + build_dataflow(dataflow).await?; + + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn build_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--").arg("build").arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to build dataflow"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/examples/rust-dataflow/sink/Cargo.toml b/examples/rust-dataflow/sink/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6be24136c4b8d946c2865d24291a438af45bcd85 --- /dev/null +++ b/examples/rust-dataflow/sink/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "rust-dataflow-example-sink" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" diff --git a/examples/rust-dataflow/sink/src/main.rs b/examples/rust-dataflow/sink/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..e180af0816c21ea20277ab67bf94f33ea4c628df --- /dev/null +++ b/examples/rust-dataflow/sink/src/main.rs @@ -0,0 +1,38 @@ +use dora_node_api::{self, DoraNode, Event}; +use eyre::{bail, Context}; + +fn main() -> eyre::Result<()> { + let (_node, mut events) = DoraNode::init_from_env()?; + + while let Some(event) = events.recv() { + match event { + Event::Input { + id, + metadata: _, + data, + } => match id.as_str() { + "message" => { + let received_string: &str = + TryFrom::try_from(&data).context("expected string message")?; + println!("sink received message: {}", received_string); + if !received_string.starts_with("operator received random value ") { + bail!("unexpected message format (should start with 'operator received random value')") + } + if !received_string.ends_with(" ticks") { + bail!("unexpected message format (should end with 'ticks')") + } + } + other => eprintln!("Ignoring unexpected input `{other}`"), + }, + Event::Stop => { + println!("Received manual stop"); + } + Event::InputClosed { id } => { + println!("Input `{id}` was closed"); + } + other => eprintln!("Received unexpected input: {other:?}"), + } + } + + Ok(()) +} diff --git a/examples/rust-dataflow/status-node/Cargo.toml b/examples/rust-dataflow/status-node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6af81b571706cd7dad0dd37d4f1ce847192d6b96 --- /dev/null +++ b/examples/rust-dataflow/status-node/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "rust-dataflow-example-status-node" +version.workspace = true +edition = "2021" +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" diff --git a/examples/rust-dataflow/status-node/src/main.rs b/examples/rust-dataflow/status-node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..09de8184c82c398a5a7d5d74d32ca8343ba34bf6 --- /dev/null +++ b/examples/rust-dataflow/status-node/src/main.rs @@ -0,0 +1,47 @@ +use dora_node_api::{self, dora_core::config::DataId, DoraNode, Event, IntoArrow}; +use eyre::Context; + +fn main() -> eyre::Result<()> { + println!("hello"); + + let status_output = DataId::from("status".to_owned()); + let (mut node, mut events) = DoraNode::init_from_env()?; + + let mut ticks = 0; + while let Some(event) = events.recv() { + match event { + Event::Input { id, metadata, data } => match id.as_ref() { + "tick" => { + ticks += 1; + } + "random" => { + let value = u64::try_from(&data).context("unexpected data type")?; + + let output = format!( + "operator received random value {value:#x} after {} ticks", + ticks + ); + node.send_output( + status_output.clone(), + metadata.parameters, + output.into_arrow(), + )?; + } + other => eprintln!("ignoring unexpected input {other}"), + }, + Event::Stop => {} + Event::InputClosed { id } => { + println!("input `{id}` was closed"); + if *id == "random" { + println!("`random` input was closed -> exiting"); + break; + } + } + other => { + println!("received unknown event {other:?}"); + } + } + } + + Ok(()) +} diff --git a/examples/rust-ros2-dataflow/README.md b/examples/rust-ros2-dataflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c30e52e6e2e03fef112334f02224125fe351ed34 --- /dev/null +++ b/examples/rust-ros2-dataflow/README.md @@ -0,0 +1,24 @@ +# `rust-ros2-dataflow` Example + +This example shows how to publish/subscribe to both ROS2 and Dora. The dataflow consists of a single node that sends random movement commands to the [ROS2 `turtlesim_node`](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Introducing-Turtlesim/Introducing-Turtlesim.html). + +## Setup + +This examples requires a sourced ROS2 installation. + +- To set up ROS2, follow the [ROS2 installation](https://docs.ros.org/en/iron/Installation.html) guide. +- Don't forget to [source the ROS2 setup files](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Configuring-ROS2-Environment.html#source-the-setup-files) +- Follow tasks 1 and 2 of the [ROS2 turtlesim tutorial](https://docs.ros.org/en/iron/Tutorials/Beginner-CLI-Tools/Introducing-Turtlesim/Introducing-Turtlesim.html#id3) + - Install the turtlesim package + - Start the turtlesim node through `ros2 run turtlesim turtlesim_node` +- In a separate terminal, start the `/add_two_ints` service: `ros2 run examples_rclcpp_minimal_service service_main` + +## Running + +After sourcing the ROS2 installation and starting both the `turtlesim` node and the `/add_two_ints` service, you can run this example to move the turtle in random directions: + +``` +cargo run --example rust-ros2-dataflow --features ros2-examples +``` + +You should see a few random requests in the terminal where you started the `examples_rclcpp_minimal_service`. diff --git a/examples/rust-ros2-dataflow/dataflow.yml b/examples/rust-ros2-dataflow/dataflow.yml new file mode 100644 index 0000000000000000000000000000000000000000..080f1f5d65b7636974c18c2588bd0f8f8dd06a21 --- /dev/null +++ b/examples/rust-ros2-dataflow/dataflow.yml @@ -0,0 +1,10 @@ +nodes: + - id: rust-node + custom: + build: cargo build -p rust-ros2-dataflow-example-node --features ros2 + source: ../../target/debug/rust-ros2-dataflow-example-node + inputs: + tick: dora/timer/millis/500 + service_timer: dora/timer/secs/1 + outputs: + - pose diff --git a/examples/rust-ros2-dataflow/node/Cargo.toml b/examples/rust-ros2-dataflow/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c984455eb18a63e76d6a016a21a3a8a4d0d7b304 --- /dev/null +++ b/examples/rust-ros2-dataflow/node/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "rust-ros2-dataflow-example-node" +version.workspace = true +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +# enables binaries that depend on a sourced ROS2 installation +ros2 = [] + +[[bin]] +name = "rust-ros2-dataflow-example-node" +required-features = ["ros2"] + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.3" +rand = "0.8.5" +tokio = { version = "1.24.2", features = ["rt", "macros"] } +dora-ros2-bridge = { workspace = true } +serde_json = "1.0.99" diff --git a/examples/rust-ros2-dataflow/node/src/main.rs b/examples/rust-ros2-dataflow/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..395a5e344f9cbf247741fe62edff65e04501f37f --- /dev/null +++ b/examples/rust-ros2-dataflow/node/src/main.rs @@ -0,0 +1,227 @@ +use std::time::Duration; + +use dora_node_api::{ + self, + dora_core::config::DataId, + merged::{MergeExternal, MergedEvent}, + DoraNode, Event, +}; +use dora_ros2_bridge::{ + messages::{ + example_interfaces::service::{AddTwoInts, AddTwoIntsRequest}, + geometry_msgs::msg::{Twist, Vector3}, + turtlesim::msg::Pose, + }, + ros2_client::{self, ros2, NodeOptions}, + rustdds::{self, policy}, +}; +use eyre::{eyre, Context}; +use futures::task::SpawnExt; + +fn main() -> eyre::Result<()> { + let mut ros_node = init_ros_node()?; + let turtle_vel_publisher = create_vel_publisher(&mut ros_node)?; + let turtle_pose_reader = create_pose_reader(&mut ros_node)?; + + // spawn a background spinner task that is handles service discovery (and other things) + let pool = futures::executor::ThreadPool::new()?; + let spinner = ros_node + .spinner() + .map_err(|e| eyre::eyre!("failed to create spinner: {e:?}"))?; + pool.spawn(async { + if let Err(err) = spinner.spin().await { + eprintln!("ros2 spinner failed: {err:?}"); + } + }) + .context("failed to spawn ros2 spinner")?; + + // create an example service client + let service_qos = { + rustdds::QosPolicyBuilder::new() + .reliability(policy::Reliability::Reliable { + max_blocking_time: rustdds::Duration::from_millis(100), + }) + .history(policy::History::KeepLast { depth: 1 }) + .build() + }; + let add_client = ros_node.create_client::( + ros2_client::ServiceMapping::Enhanced, + &ros2_client::Name::new("/", "add_two_ints").unwrap(), + &ros2_client::ServiceTypeName::new("example_interfaces", "AddTwoInts"), + service_qos.clone(), + service_qos.clone(), + )?; + + // wait until the service server is ready + println!("wait for add_two_ints service"); + let service_ready = async { + for _ in 0..10 { + let ready = add_client.wait_for_service(&ros_node); + futures::pin_mut!(ready); + let timeout = futures_timer::Delay::new(Duration::from_secs(2)); + match futures::future::select(ready, timeout).await { + futures::future::Either::Left(((), _)) => { + println!("add_two_ints service is ready"); + return Ok(()); + } + futures::future::Either::Right(_) => { + println!("timeout while waiting for add_two_ints service, retrying"); + } + } + } + eyre::bail!("add_two_ints service not available"); + }; + futures::executor::block_on(service_ready)?; + + let output = DataId::from("pose".to_owned()); + + let (mut node, dora_events) = DoraNode::init_from_env()?; + + let merged = dora_events.merge_external(Box::pin(turtle_pose_reader.async_stream())); + let mut events = futures::executor::block_on_stream(merged); + + for i in 0..1000 { + let event = match events.next() { + Some(input) => input, + None => break, + }; + + match event { + MergedEvent::Dora(event) => match event { + Event::Input { + id, + metadata: _, + data: _, + } => match id.as_str() { + "tick" => { + let direction = Twist { + linear: Vector3 { + x: rand::random::() + 1.0, + ..Default::default() + }, + angular: Vector3 { + z: (rand::random::() - 0.5) * 5.0, + ..Default::default() + }, + }; + println!("tick {i}, sending {direction:?}"); + turtle_vel_publisher.publish(direction).unwrap(); + } + "service_timer" => { + let a = rand::random(); + let b = rand::random(); + let service_result = add_two_ints_request(&add_client, a, b); + let sum = futures::executor::block_on(service_result) + .context("failed to send service request")?; + if sum != a.wrapping_add(b) { + eyre::bail!("unexpected addition result: expected {}, got {sum}", a + b) + } + } + other => eprintln!("Ignoring unexpected input `{other}`"), + }, + Event::Stop => println!("Received manual stop"), + other => eprintln!("Received unexpected input: {other:?}"), + }, + MergedEvent::External(pose) => { + println!("received pose event: {pose:?}"); + if let Ok((pose, _)) = pose { + let serialized = serde_json::to_string(&pose)?; + node.send_output_bytes( + output.clone(), + Default::default(), + serialized.len(), + serialized.as_bytes(), + )?; + } + } + } + } + + Ok(()) +} + +async fn add_two_ints_request( + add_client: &ros2_client::Client, + a: i64, + b: i64, +) -> eyre::Result { + let request = AddTwoIntsRequest { a, b }; + println!("sending add request {request:?}"); + let request_id = add_client.async_send_request(request.clone()).await?; + println!("{request_id:?}"); + + let response = add_client.async_receive_response(request_id); + futures::pin_mut!(response); + let timeout = futures_timer::Delay::new(Duration::from_secs(15)); + match futures::future::select(response, timeout).await { + futures::future::Either::Left((Ok(response), _)) => { + println!("received response: {response:?}"); + Ok(response.sum) + } + futures::future::Either::Left((Err(err), _)) => eyre::bail!(err), + futures::future::Either::Right(_) => { + eyre::bail!("timeout while waiting for response"); + } + } +} + +fn init_ros_node() -> eyre::Result { + let ros_context = ros2_client::Context::new().unwrap(); + + ros_context + .new_node( + ros2_client::NodeName::new("/ros2_demo", "turtle_teleop") + .map_err(|e| eyre!("failed to create ROS2 node name: {e}"))?, + NodeOptions::new().enable_rosout(true), + ) + .map_err(|e| eyre::eyre!("failed to create ros2 node: {e:?}")) +} + +fn create_vel_publisher( + ros_node: &mut ros2_client::Node, +) -> eyre::Result> { + let topic_qos: rustdds::QosPolicies = { + rustdds::QosPolicyBuilder::new() + .durability(policy::Durability::Volatile) + .liveliness(policy::Liveliness::Automatic { + lease_duration: ros2::Duration::INFINITE, + }) + .reliability(policy::Reliability::Reliable { + max_blocking_time: ros2::Duration::from_millis(100), + }) + .history(policy::History::KeepLast { depth: 1 }) + .build() + }; + + let turtle_cmd_vel_topic = ros_node + .create_topic( + &ros2_client::Name::new("/turtle1", "cmd_vel") + .map_err(|e| eyre!("failed to create ROS2 name: {e}"))?, + ros2_client::MessageTypeName::new("geometry_msgs", "Twist"), + &topic_qos, + ) + .context("failed to create topic")?; + + // The point here is to publish Twist for the turtle + let turtle_cmd_vel_writer = ros_node + .create_publisher::(&turtle_cmd_vel_topic, None) + .context("failed to create publisher")?; + Ok(turtle_cmd_vel_writer) +} + +fn create_pose_reader( + ros_node: &mut ros2_client::Node, +) -> eyre::Result> { + let turtle_pose_topic = ros_node + .create_topic( + &ros2_client::Name::new("/turtle1", "pose") + .map_err(|e| eyre!("failed to create ROS2 name: {e}"))?, + ros2_client::MessageTypeName::new("turtlesim", "Pose"), + &Default::default(), + ) + .context("failed to create topic")?; + let turtle_pose_reader = ros_node + .create_subscription::(&turtle_pose_topic, None) + .context("failed to create subscription")?; + Ok(turtle_pose_reader) +} diff --git a/examples/rust-ros2-dataflow/run.rs b/examples/rust-ros2-dataflow/run.rs new file mode 100644 index 0000000000000000000000000000000000000000..a14dce485da481788793d0b8c6add066e44ed158 --- /dev/null +++ b/examples/rust-ros2-dataflow/run.rs @@ -0,0 +1,46 @@ +use dora_tracing::set_up_tracing; +use eyre::{bail, Context}; +use std::path::Path; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + set_up_tracing("rust-ros2-dataflow-runner").wrap_err("failed to set up tracing subscriber")?; + + let root = Path::new(env!("CARGO_MANIFEST_DIR")); + std::env::set_current_dir(root.join(file!()).parent().unwrap()) + .wrap_err("failed to set working dir")?; + + let dataflow = Path::new("dataflow.yml"); + build_dataflow(dataflow).await?; + + run_dataflow(dataflow).await?; + + Ok(()) +} + +async fn build_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--").arg("build").arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to build dataflow"); + }; + Ok(()) +} + +async fn run_dataflow(dataflow: &Path) -> eyre::Result<()> { + let cargo = std::env::var("CARGO").unwrap(); + let mut cmd = tokio::process::Command::new(&cargo); + cmd.arg("run"); + cmd.arg("--package").arg("dora-cli"); + cmd.arg("--") + .arg("daemon") + .arg("--run-dataflow") + .arg(dataflow); + if !cmd.status().await?.success() { + bail!("failed to run dataflow"); + }; + Ok(()) +} diff --git a/libraries/arrow-convert/Cargo.toml b/libraries/arrow-convert/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..8307d72f97b810d0afa681b2a88cb4c7f19b2e90 --- /dev/null +++ b/libraries/arrow-convert/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "dora-arrow-convert" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arrow = { workspace = true } +eyre = "0.6.8" diff --git a/libraries/arrow-convert/src/from_impls.rs b/libraries/arrow-convert/src/from_impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..01e8a9518850bc8c361c88723683e0b8d7a197d1 --- /dev/null +++ b/libraries/arrow-convert/src/from_impls.rs @@ -0,0 +1,177 @@ +use arrow::{ + array::{Array, AsArray, PrimitiveArray, StringArray}, + datatypes::ArrowPrimitiveType, +}; +use eyre::ContextCompat; + +use crate::ArrowData; + +impl From for arrow::array::ArrayRef { + fn from(value: ArrowData) -> Self { + value.0 + } +} + +impl From for ArrowData { + fn from(value: arrow::array::ArrayRef) -> Self { + Self(value) + } +} + +impl TryFrom<&ArrowData> for bool { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let bool_array = value.as_boolean_opt().context("not a bool array")?; + if bool_array.is_empty() { + eyre::bail!("empty array"); + } + if bool_array.len() != 1 { + eyre::bail!("expected length 1"); + } + if bool_array.null_count() != 0 { + eyre::bail!("bool array has nulls"); + } + Ok(bool_array.value(0)) + } +} +impl TryFrom<&ArrowData> for u8 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive UInt8Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for u16 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive UInt16Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for u32 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive UInt32Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for u64 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive UInt64Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for i8 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive Int8Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for i16 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive Int16Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for i32 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive Int32Type array")?; + extract_single_primitive(array) + } +} +impl TryFrom<&ArrowData> for i64 { + type Error = eyre::Report; + fn try_from(value: &ArrowData) -> Result { + let array = value + .as_primitive_opt::() + .context("not a primitive Int64Type array")?; + extract_single_primitive(array) + } +} + +impl<'a> TryFrom<&'a ArrowData> for &'a str { + type Error = eyre::Report; + fn try_from(value: &'a ArrowData) -> Result { + let array: &StringArray = value.as_string_opt().wrap_err("not a string array")?; + if array.is_empty() { + eyre::bail!("empty array"); + } + if array.len() != 1 { + eyre::bail!("expected length 1"); + } + if array.null_count() != 0 { + eyre::bail!("array has nulls"); + } + Ok(array.value(0)) + } +} + +impl<'a> TryFrom<&'a ArrowData> for &'a [u8] { + type Error = eyre::Report; + fn try_from(value: &'a ArrowData) -> Result { + let array: &PrimitiveArray = value + .as_primitive_opt() + .wrap_err("not a primitive UInt8Type array")?; + if array.null_count() != 0 { + eyre::bail!("array has nulls"); + } + Ok(array.values()) + } +} + +impl<'a> TryFrom<&'a ArrowData> for Vec { + type Error = eyre::Report; + fn try_from(value: &'a ArrowData) -> Result { + value.try_into().map(|slice: &'a [u8]| slice.to_vec()) + } +} + +fn extract_single_primitive(array: &PrimitiveArray) -> Result +where + T: ArrowPrimitiveType, +{ + if array.is_empty() { + eyre::bail!("empty array"); + } + if array.len() != 1 { + eyre::bail!("expected length 1"); + } + if array.null_count() != 0 { + eyre::bail!("array has nulls"); + } + Ok(array.value(0)) +} + +#[cfg(test)] +mod tests { + use arrow::array::{make_array, PrimitiveArray}; + + use crate::ArrowData; + + #[test] + fn test_u8() { + let array = + make_array(PrimitiveArray::::from(vec![42]).into()); + let data: ArrowData = array.into(); + let value: u8 = (&data).try_into().unwrap(); + assert_eq!(value, 42); + } +} diff --git a/libraries/arrow-convert/src/into_impls.rs b/libraries/arrow-convert/src/into_impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..030d1b477c2859285aaa7931778d261f733880b5 --- /dev/null +++ b/libraries/arrow-convert/src/into_impls.rs @@ -0,0 +1,147 @@ +use arrow::array::{PrimitiveArray, StringArray}; + +use crate::IntoArrow; + +impl IntoArrow for bool { + type A = arrow::array::BooleanArray; + fn into_arrow(self) -> Self::A { + std::iter::once(Some(self)).collect() + } +} + +impl IntoArrow for u8 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for u16 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for u32 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for u64 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for i8 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for i16 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for i32 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for i64 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for f32 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} +impl IntoArrow for f64 { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + std::iter::once(self).collect() + } +} + +impl IntoArrow for &str { + type A = StringArray; + fn into_arrow(self) -> Self::A { + std::iter::once(Some(self)).collect() + } +} + +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} +impl IntoArrow for Vec { + type A = PrimitiveArray; + fn into_arrow(self) -> Self::A { + self.into() + } +} + +impl IntoArrow for () { + type A = arrow::array::NullArray; + + fn into_arrow(self) -> Self::A { + arrow::array::NullArray::new(0) + } +} diff --git a/libraries/arrow-convert/src/lib.rs b/libraries/arrow-convert/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..826d456343126d0b52cb332d5d2c11f611782328 --- /dev/null +++ b/libraries/arrow-convert/src/lib.rs @@ -0,0 +1,29 @@ +use std::ops::{Deref, DerefMut}; + +use arrow::array::Array; + +mod from_impls; +mod into_impls; + +pub trait IntoArrow { + type A: Array; + + fn into_arrow(self) -> Self::A; +} + +#[derive(Debug)] +pub struct ArrowData(pub arrow::array::ArrayRef); + +impl Deref for ArrowData { + type Target = arrow::array::ArrayRef; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ArrowData { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/libraries/communication-layer/pub-sub/Cargo.toml b/libraries/communication-layer/pub-sub/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..735e364dca52c95d09a2934d6977eb07d56fa694 --- /dev/null +++ b/libraries/communication-layer/pub-sub/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "communication-layer-pub-sub" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +[features] +default = ["zenoh"] +zenoh = ["dep:zenoh"] + +[dependencies] +zenoh = { version = "0.7.0-rc", optional = true, features = ["transport_tcp"] } +flume = "0.10" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/libraries/communication-layer/pub-sub/src/lib.rs b/libraries/communication-layer/pub-sub/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d4822f156e6f77ee6b58b6f4e104832fedfc3dc --- /dev/null +++ b/libraries/communication-layer/pub-sub/src/lib.rs @@ -0,0 +1,87 @@ +#![warn(missing_docs)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +//! Abstraction of various publisher/subscriber communication backends. +//! +//! Provides a [`CommunicationLayer`] trait as an abstraction for different publisher/subscriber +//! systems. The following set of backends are currently supported: +//! +//! - **[Zenoh](https://zenoh.io/):** The zenoh project implements a distributed +//! publisher/subscriber system with automated routing. To use zenoh, use the +//! [`ZenohCommunicationLayer`][zenoh::ZenohCommunicationLayer] struct. + +use std::borrow::Cow; + +#[cfg(feature = "zenoh")] +pub mod zenoh; + +type BoxError = Box; + +/// Abstraction trait for different publisher/subscriber implementations. +pub trait CommunicationLayer: Send + Sync { + /// Creates a publisher for the given topic. + fn publisher(&mut self, topic: &str) -> Result, BoxError>; + + /// Subscribe to the given topic. + fn subscribe(&mut self, topic: &str) -> Result, BoxError>; +} + +/// Allows publishing messages to subscribers. +/// +/// The messages is published to the topic that was used to create the publisher +/// (see [`CommunicationLayer::publisher`]). +pub trait Publisher: Send + Sync { + /// Prepare memory for publishing a message with the given length. + /// + /// This function makes it possible to construct messages without + /// any additional copying. The returned [`Sample`] is initialized + /// with zeros. + fn prepare(&self, len: usize) -> Result, BoxError>; + + /// Clone this publisher, returning the clone as a + /// [trait object](https://doc.rust-lang.org/book/ch17-02-trait-objects.html). + fn dyn_clone(&self) -> Box; + + /// Publishes the gives message to subscribers. + /// + /// Depending on the backend, this method might need to copy the data, which can + /// decrease performance. To avoid this, the [`prepare`](Publisher::prepare) function + /// can be used to construct the message in-place. + fn publish(&self, data: &[u8]) -> Result<(), BoxError> { + let mut sample = self.prepare(data.len())?; + sample.as_mut_slice().copy_from_slice(data); + sample.publish()?; + Ok(()) + } +} + +/// A prepared message constructed by [`Publisher::prepare`]. +pub trait PublishSample<'a>: Send + Sync { + /// Gets a reference to the prepared message. + /// + /// Makes it possible to construct the message in-place. + fn as_mut_slice(&mut self) -> &mut [u8]; + + /// Publish this sample to subscribers. + /// + /// The sample is published to the topic that was used to create the corresponding publisher + /// (see [`CommunicationLayer::publisher`]). + fn publish(self: Box) -> Result<(), BoxError>; +} + +/// Allows receiving messages published on a topic. +pub trait Subscriber: Send + Sync { + /// Receives the next message. + /// + /// Blocks until the next message is available. + /// + /// Depending on the chosen communication backend, some messages might be dropped if + /// the publisher is faster than the subscriber. + fn recv(&mut self) -> Result>, BoxError>; +} + +/// A message received from the communication layer. +pub trait ReceivedSample: Send + Sync { + /// Access the message's data. + fn get(&self) -> Cow<[u8]>; +} diff --git a/libraries/communication-layer/pub-sub/src/zenoh.rs b/libraries/communication-layer/pub-sub/src/zenoh.rs new file mode 100644 index 0000000000000000000000000000000000000000..76be1a54ad14c9f8834c4170e7a95b49d67df33a --- /dev/null +++ b/libraries/communication-layer/pub-sub/src/zenoh.rs @@ -0,0 +1,135 @@ +//! Provides [`ZenohCommunicationLayer`] to communicate over `zenoh`. + +use super::{CommunicationLayer, Publisher, Subscriber}; +use crate::{BoxError, ReceivedSample}; +use std::{borrow::Cow, sync::Arc, time::Duration}; +use zenoh::{ + prelude::{sync::SyncResolve, Config, Priority, SessionDeclarations, SplitBuffer}, + publication::CongestionControl, +}; + +/// Allows communication over `zenoh`. +pub struct ZenohCommunicationLayer { + zenoh: Arc, + topic_prefix: String, +} + +impl ZenohCommunicationLayer { + /// Initializes a new `zenoh` session with the given configuration. + /// + /// The `prefix` is added to all topic names when using the [`publisher`][Self::publisher] + /// and [`subscriber`][Self::subscribe] methods. Pass an empty string if no prefix is + /// desired. + pub fn init(config: Config, prefix: String) -> Result { + let zenoh = ::zenoh::open(config) + .res_sync() + .map_err(BoxError::from)? + .into_arc(); + Ok(Self { + zenoh, + topic_prefix: prefix, + }) + } + + fn prefixed(&self, topic: &str) -> String { + format!("{}/{topic}", self.topic_prefix) + } +} + +impl CommunicationLayer for ZenohCommunicationLayer { + fn publisher(&mut self, topic: &str) -> Result, BoxError> { + let publisher = self + .zenoh + .declare_publisher(self.prefixed(topic)) + .congestion_control(CongestionControl::Block) + .priority(Priority::RealTime) + .res_sync() + .map_err(BoxError::from)?; + + Ok(Box::new(ZenohPublisher { publisher })) + } + + fn subscribe(&mut self, topic: &str) -> Result, BoxError> { + let subscriber = self + .zenoh + .declare_subscriber(self.prefixed(topic)) + .reliable() + .res_sync() + .map_err(BoxError::from)?; + + Ok(Box::new(ZenohReceiver(subscriber))) + } +} + +impl Drop for ZenohCommunicationLayer { + fn drop(&mut self) { + // wait a bit before closing to ensure that remaining published + // messages are sent out + // + // TODO: create a minimal example to reproduce the dropped messages + // and report this issue in the zenoh repo + std::thread::sleep(Duration::from_secs_f32(2.0)); + } +} + +#[derive(Clone)] +struct ZenohPublisher { + publisher: zenoh::publication::Publisher<'static>, +} + +impl Publisher for ZenohPublisher { + fn prepare(&self, len: usize) -> Result, BoxError> { + Ok(Box::new(ZenohPublishSample { + sample: vec![0; len], + publisher: self.publisher.clone(), + })) + } + + fn dyn_clone(&self) -> Box { + Box::new(self.clone()) + } +} + +#[derive(Clone)] +struct ZenohPublishSample { + sample: Vec, + publisher: zenoh::publication::Publisher<'static>, +} + +impl<'a> crate::PublishSample<'a> for ZenohPublishSample { + fn as_mut_slice(&mut self) -> &mut [u8] { + &mut self.sample + } + + fn publish(self: Box) -> Result<(), BoxError> { + self.publisher + .put(self.sample) + .res_sync() + .map_err(BoxError::from) + } +} + +struct ZenohReceiver( + zenoh::subscriber::Subscriber<'static, flume::Receiver>, +); + +impl Subscriber for ZenohReceiver { + fn recv(&mut self) -> Result>, BoxError> { + match self.0.recv() { + Ok(sample) => Ok(Some(Box::new(ZenohReceivedSample { + sample: sample.value.payload, + }))), + Err(_) => Ok(None), + } + } +} + +struct ZenohReceivedSample { + sample: zenoh::buffers::ZBuf, +} + +impl ReceivedSample for ZenohReceivedSample { + fn get(&self) -> Cow<[u8]> { + self.sample.contiguous() + } +} diff --git a/libraries/communication-layer/request-reply/Cargo.toml b/libraries/communication-layer/request-reply/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d7ec67967b5b307e9e935a7f1a598a959584a225 --- /dev/null +++ b/libraries/communication-layer/request-reply/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "communication-layer-request-reply" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/libraries/communication-layer/request-reply/src/lib.rs b/libraries/communication-layer/request-reply/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..83f12699fab3b52e9a8f57f4d219b4faa5fba382 --- /dev/null +++ b/libraries/communication-layer/request-reply/src/lib.rs @@ -0,0 +1,77 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +//! Abstraction of various request/reply communication backends. +//! +//! Provides a [`RequestReplyLayer`] trait as an abstraction for different request/reply +//! systems. The following set of backends are currently supported: +//! +//! TODO + +pub use tcp::*; + +mod tcp; + +/// Abstraction trait for different publisher/subscriber implementations. +pub trait RequestReplyLayer: Send + Sync { + type Address; + type RequestData; + type ReplyData; + type Error; + + #[allow(clippy::type_complexity)] + fn listen( + &mut self, + addr: Self::Address, + ) -> Result< + Box< + dyn Iterator< + Item = Result< + Box< + dyn ListenConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + >, + Self::Error, + >, + >, + >, + Self::Error, + >; + + #[allow(clippy::type_complexity)] + fn connect( + &mut self, + addr: Self::Address, + ) -> Result< + Box< + dyn RequestReplyConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + >, + Self::Error, + >; +} + +pub trait ListenConnection: Send + Sync { + type RequestData; + type ReplyData; + type Error; + + #[allow(clippy::type_complexity)] + fn handle_next( + &mut self, + handler: Box Result>, + ) -> Result<(), Self::Error>; +} + +pub trait RequestReplyConnection: Send + Sync { + type RequestData; + type ReplyData; + type Error; + + fn request(&mut self, request: &Self::RequestData) -> Result; +} diff --git a/libraries/communication-layer/request-reply/src/tcp.rs b/libraries/communication-layer/request-reply/src/tcp.rs new file mode 100644 index 0000000000000000000000000000000000000000..eea7c5e6c7cf033f71bbf08805fe7c2af0764717 --- /dev/null +++ b/libraries/communication-layer/request-reply/src/tcp.rs @@ -0,0 +1,162 @@ +use std::{ + io::{Read, Write}, + net::{SocketAddr, TcpListener, TcpStream}, +}; + +use crate::{ListenConnection, RequestReplyConnection, RequestReplyLayer}; + +pub type TcpRequestReplyConnection = + dyn RequestReplyConnection, ReplyData = Vec, Error = std::io::Error>; + +pub struct TcpLayer {} + +impl TcpLayer { + pub fn new() -> Self { + Self {} + } +} + +impl Default for TcpLayer { + fn default() -> Self { + Self::new() + } +} + +impl RequestReplyLayer for TcpLayer { + type Address = SocketAddr; + type RequestData = Vec; + type ReplyData = Vec; + type Error = std::io::Error; + + fn listen( + &mut self, + addr: Self::Address, + ) -> Result< + Box< + dyn Iterator< + Item = Result< + Box< + dyn crate::ListenConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + >, + Self::Error, + >, + >, + >, + Self::Error, + > { + let incoming: Box>> = Box::new( + IntoIncoming { + listener: TcpListener::bind(addr)?, + } + .map(|r| { + r.map(|stream| { + let connection: Box< + dyn ListenConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + > = Box::new(TcpConnection { stream }); + connection + }) + }), + ); + Ok(incoming) + } + + fn connect( + &mut self, + addr: Self::Address, + ) -> Result< + Box< + dyn crate::RequestReplyConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + >, + Self::Error, + > { + TcpStream::connect(addr).map(|s| { + let connection: Box< + dyn RequestReplyConnection< + RequestData = Self::RequestData, + ReplyData = Self::ReplyData, + Error = Self::Error, + >, + > = Box::new(TcpConnection { stream: s }); + connection + }) + } +} + +struct TcpConnection { + stream: TcpStream, +} + +impl ListenConnection for TcpConnection { + type RequestData = Vec; + type ReplyData = Vec; + type Error = std::io::Error; + + fn handle_next( + &mut self, + handler: Box Result>, + ) -> Result<(), Self::Error> { + let request = self.receive()?; + let reply = handler(request)?; + self.send(&reply)?; + Ok(()) + } +} + +impl RequestReplyConnection for TcpConnection { + type RequestData = Vec; + type ReplyData = Vec; + type Error = std::io::Error; + + fn request(&mut self, request: &Self::RequestData) -> Result { + self.send(request)?; + let reply = self.receive()?; + + Ok(reply) + } +} + +impl TcpConnection { + fn send(&mut self, request: &[u8]) -> std::io::Result<()> { + let len_raw = (request.len() as u64).to_le_bytes(); + self.stream.write_all(&len_raw)?; + self.stream.write_all(request)?; + Ok(()) + } + + fn receive(&mut self) -> std::io::Result> { + let reply_len = { + let mut raw = [0; 8]; + self.stream.read_exact(&mut raw)?; + u64::from_le_bytes(raw) as usize + }; + let mut reply = vec![0; reply_len]; + self.stream.read_exact(&mut reply)?; + Ok(reply) + } +} + +// taken from `std::net::tcp` module (still unstable) +pub struct IntoIncoming { + listener: TcpListener, +} + +impl Iterator for IntoIncoming { + type Item = std::io::Result; + fn next(&mut self) -> Option> { + Some(self.listener.accept().map(|p| p.0)) + } +} + +impl std::iter::FusedIterator for IntoIncoming {} diff --git a/libraries/core/Cargo.toml b/libraries/core/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a211d8d7cf13cccde6c745c72dbec6e69faedd8a --- /dev/null +++ b/libraries/core/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "dora-core" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eyre = "0.6.8" +serde = { version = "1.0.136", features = ["derive"] } +serde_yaml = "0.9.11" +once_cell = "1.13.0" +which = "5.0.0" +uuid = { version = "1.7", features = ["serde", "v7"] } +dora-message = { workspace = true } +tracing = "0.1" +serde-with-expand-env = "1.1.0" +tokio = { version = "1.24.1", features = ["fs", "process", "sync"] } +aligned-vec = { version = "0.5.0", features = ["serde"] } +schemars = "0.8.19" +serde_json = "1.0.117" diff --git a/libraries/core/README.md b/libraries/core/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f581ef17807c0e79ae1b252da37f8ded5bb88140 --- /dev/null +++ b/libraries/core/README.md @@ -0,0 +1,31 @@ +# Core library for dora + +## Generating dora schema + +```bash +cargo run -p dora-core generate_schemas +``` + +## VSCode YAML Dataflow Support + +We can pass the JSON Schema to VSCode [`redhat.vscode-yaml`](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) to enables features such as: + +- Type validation +- Suggestions +- Documentation + +### Getting started + +1. Install [`redhat.vscode-yaml`](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) + +2. Open User Settings(JSON) in VSCode within `ctrl`+ `shift` + `p` search bar. + +3. Add the following: + +```json + "yaml.schemas": { + "https://raw.githubusercontent.com/dora-rs/dora/main/libraries/core/dora-schema.json": "/*" + }, +``` + +And you should be set! 🔥 diff --git a/libraries/core/dora-schema.json b/libraries/core/dora-schema.json new file mode 100644 index 0000000000000000000000000000000000000000..84c68fd81e09876b9235682271daaaa96ebfe753 --- /dev/null +++ b/libraries/core/dora-schema.json @@ -0,0 +1,466 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "dora-rs specification", + "description": "Dataflow description", + "type": "object", + "required": [ + "nodes" + ], + "properties": { + "nodes": { + "type": "array", + "items": { + "$ref": "#/definitions/Node" + } + } + }, + "additionalProperties": true, + "definitions": { + "CustomNode": { + "type": "object", + "required": [ + "source" + ], + "properties": { + "args": { + "description": "Args for the executable.", + "type": [ + "string", + "null" + ] + }, + "build": { + "type": [ + "string", + "null" + ] + }, + "envs": { + "description": "Environment variables for the custom nodes\n\nDeprecated, use outer-level `env` field instead.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "$ref": "#/definitions/EnvValue" + } + }, + "inputs": { + "description": "Inputs for the nodes as a map from input ID to `node_id/output_id`.\n\ne.g.\n\ninputs:\n\nexample_input: example_node/example_output1", + "default": {}, + "type": "object", + "additionalProperties": true + }, + "outputs": { + "description": "List of output IDs.\n\ne.g.\n\noutputs:\n\n- output_1\n\n- output_2", + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/DataId" + }, + "uniqueItems": true + }, + "send_stdout_as": { + "description": "Send stdout and stderr to another node", + "type": [ + "string", + "null" + ] + }, + "source": { + "description": "Path of the source code\n\nIf you want to use a specific `conda` environment. Provide the python path within the source.\n\nsource: /home/peter/miniconda3/bin/python\n\nargs: some_node.py\n\nSource can match any executable in PATH.", + "type": "string" + } + } + }, + "DataId": { + "type": "string" + }, + "Duration": { + "type": "object", + "required": [ + "nanos", + "secs" + ], + "properties": { + "nanos": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "secs": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "EnvValue": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + { + "type": "string" + } + ] + }, + "Input": { + "type": "object", + "required": [ + "mapping" + ], + "properties": { + "mapping": { + "$ref": "#/definitions/InputMapping" + }, + "queue_size": { + "type": [ + "integer", + "null" + ], + "format": "uint", + "minimum": 0.0 + } + }, + "additionalProperties": true + }, + "InputMapping": { + "oneOf": [ + { + "type": "object", + "required": [ + "Timer" + ], + "properties": { + "Timer": { + "type": "object", + "required": [ + "interval" + ], + "properties": { + "interval": { + "$ref": "#/definitions/Duration" + } + } + } + }, + "additionalProperties": true + }, + { + "type": "object", + "required": [ + "User" + ], + "properties": { + "User": { + "$ref": "#/definitions/UserInputMapping" + } + }, + "additionalProperties": true + } + ] + }, + "Node": { + "description": "Dora Node", + "type": "object", + "required": [ + "id" + ], + "properties": { + "args": { + "type": [ + "string", + "null" + ] + }, + "build": { + "type": [ + "string", + "null" + ] + }, + "custom": { + "anyOf": [ + { + "$ref": "#/definitions/CustomNode" + }, + { + "type": "null" + } + ] + }, + "description": { + "description": "Description of the node", + "type": [ + "string", + "null" + ] + }, + "env": { + "description": "Environment variables", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "$ref": "#/definitions/EnvValue" + } + }, + "id": { + "description": "Node identifier", + "allOf": [ + { + "$ref": "#/definitions/NodeId" + } + ] + }, + "inputs": { + "default": {}, + "type": "object", + "additionalProperties": true + }, + "name": { + "description": "Node name", + "type": [ + "string", + "null" + ] + }, + "operator": { + "anyOf": [ + { + "$ref": "#/definitions/SingleOperatorDefinition" + }, + { + "type": "null" + } + ] + }, + "operators": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/OperatorDefinition" + } + }, + "outputs": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/DataId" + }, + "uniqueItems": true + }, + "path": { + "type": [ + "string", + "null" + ] + }, + "send_stdout_as": { + "type": [ + "string", + "null" + ] + } + }, + "additionalProperties": true + }, + "NodeId": { + "type": "string" + }, + "OperatorDefinition": { + "type": "object", + "oneOf": [ + { + "type": "object", + "required": [ + "shared-library" + ], + "properties": { + "shared-library": { + "type": "string" + } + }, + "additionalProperties": true + }, + { + "type": "object", + "required": [ + "python" + ], + "properties": { + + }, + "additionalProperties": true + } + ], + "required": [ + "id" + ], + "properties": { + "build": { + "type": [ + "string", + "null" + ] + }, + "description": { + "type": [ + "string", + "null" + ] + }, + "id": { + "$ref": "#/definitions/OperatorId" + }, + "inputs": { + "default": {}, + "type": "object", + "additionalProperties": true + }, + "name": { + "type": [ + "string", + "null" + ] + }, + "outputs": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/DataId" + }, + "uniqueItems": true + }, + "send_stdout_as": { + "type": [ + "string", + "null" + ] + } + } + }, + "OperatorId": { + "type": "string" + }, + "PythonSource": { + "type": "object", + "required": [ + "source" + ], + "properties": { + "conda_env": { + "type": [ + "string", + "null" + ] + }, + "source": { + "type": "string" + } + }, + "additionalProperties": true + }, + "SingleOperatorDefinition": { + "type": "object", + "oneOf": [ + { + "type": "object", + "required": [ + "shared-library" + ], + "properties": { + "shared-library": { + "type": "string" + } + }, + "additionalProperties": true + }, + { + "type": "object", + "required": [ + "python" + ], + "properties": { + + }, + "additionalProperties": true + } + ], + "properties": { + "build": { + "type": [ + "string", + "null" + ] + }, + "description": { + "type": [ + "string", + "null" + ] + }, + "id": { + "description": "ID is optional if there is only a single operator.", + "anyOf": [ + { + "$ref": "#/definitions/OperatorId" + }, + { + "type": "null" + } + ] + }, + "inputs": { + "default": {}, + "type": "object", + "additionalProperties": true + }, + "name": { + "type": [ + "string", + "null" + ] + }, + "outputs": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/DataId" + }, + "uniqueItems": true + }, + "send_stdout_as": { + "type": [ + "string", + "null" + ] + } + } + }, + "UserInputMapping": { + "type": "object", + "required": [ + "output", + "source" + ], + "properties": { + "output": { + "$ref": "#/definitions/DataId" + }, + "source": { + "$ref": "#/definitions/NodeId" + } + } + } + } +} \ No newline at end of file diff --git a/libraries/core/src/bin/generate_schema.rs b/libraries/core/src/bin/generate_schema.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2bda3e3f78f3f52bd48058e7607b3d5e2896626 --- /dev/null +++ b/libraries/core/src/bin/generate_schema.rs @@ -0,0 +1,42 @@ +use std::{env, path::Path}; + +use dora_core::descriptor::Descriptor; +use schemars::schema_for; + +fn main() -> () { + let schema = schema_for!(Descriptor); + let raw_schema = + serde_json::to_string_pretty(&schema).expect("Could not serialize schema to json"); + + // Add additional properties to True, as #[derive(transparent)] of enums are not well handled. + // + // 'OneOf' such as Custom Nodes, Operators and Single Operators overwrite property values of the initial struct `Nodes`.` + // which make the original properties such as `id` and `name` not validated by IDE extensions. + let raw_schema = raw_schema.replace( + "\"additionalProperties\": false", + "\"additionalProperties\": true", + ); + + // Remove `serde(from=` nested field as they are not handled properly by `schemars` + let raw_schema = raw_schema.replace( + "\"python\": { + \"$ref\": \"#/definitions/PythonSource\" + }", + "", + ); + let raw_schema = raw_schema.replace( + "{ + \"$ref\": \"#/definitions/Input\" + }", + "true", + ); + + // Get the Cargo root manifest directory + let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not set"); + + // Create the path for the new file next to Cargo.toml + let new_file_path = Path::new(&manifest_dir).join("dora-schema.json"); + + // write to file + std::fs::write(new_file_path, raw_schema).expect("Could not write schema to file"); +} diff --git a/libraries/core/src/config.rs b/libraries/core/src/config.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b8f2ac15958b050f5288329f455fa97cceff1dc --- /dev/null +++ b/libraries/core/src/config.rs @@ -0,0 +1,374 @@ +use once_cell::sync::OnceCell; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::{ + borrow::Borrow, + collections::{BTreeMap, BTreeSet}, + convert::Infallible, + fmt, + str::FromStr, + time::Duration, +}; + +#[derive( + Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, JsonSchema, +)] +pub struct NodeId(String); + +impl FromStr for NodeId { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self(s.to_owned())) + } +} + +impl From for NodeId { + fn from(id: String) -> Self { + Self(id) + } +} + +impl std::fmt::Display for NodeId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } +} + +impl AsRef for NodeId { + fn as_ref(&self) -> &str { + &self.0 + } +} + +#[derive( + Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, JsonSchema, +)] +pub struct OperatorId(String); + +impl FromStr for OperatorId { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self(s.to_owned())) + } +} + +impl From for OperatorId { + fn from(id: String) -> Self { + Self(id) + } +} + +impl std::fmt::Display for OperatorId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } +} + +impl AsRef for OperatorId { + fn as_ref(&self) -> &str { + &self.0 + } +} + +#[derive( + Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, JsonSchema, +)] +pub struct DataId(String); + +impl From for String { + fn from(id: DataId) -> Self { + id.0 + } +} + +impl From for DataId { + fn from(id: String) -> Self { + Self(id) + } +} + +impl std::fmt::Display for DataId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } +} + +impl std::ops::Deref for DataId { + type Target = String; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef for DataId { + fn as_ref(&self) -> &String { + &self.0 + } +} + +impl AsRef for DataId { + fn as_ref(&self) -> &str { + &self.0 + } +} + +impl Borrow for DataId { + fn borrow(&self) -> &String { + &self.0 + } +} + +impl Borrow for DataId { + fn borrow(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, JsonSchema)] +pub enum InputMapping { + Timer { interval: Duration }, + User(UserInputMapping), +} + +impl InputMapping { + pub fn source(&self) -> &NodeId { + static DORA_NODE_ID: OnceCell = OnceCell::new(); + + match self { + InputMapping::User(mapping) => &mapping.source, + InputMapping::Timer { .. } => DORA_NODE_ID.get_or_init(|| NodeId("dora".to_string())), + } + } +} + +impl fmt::Display for InputMapping { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InputMapping::Timer { interval } => { + let duration = format_duration(*interval); + write!(f, "dora/timer/{duration}") + } + InputMapping::User(mapping) => { + write!(f, "{}/{}", mapping.source, mapping.output) + } + } + } +} + +impl Serialize for InputMapping { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.collect_str(self) + } +} + +impl<'de> Deserialize<'de> for InputMapping { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + let (source, output) = string + .split_once('/') + .ok_or_else(|| serde::de::Error::custom("input must start with `/`"))?; + + let deserialized = match source { + "dora" => match output.split_once('/') { + Some(("timer", output)) => { + let (unit, value) = output.split_once('/').ok_or_else(|| { + serde::de::Error::custom( + "timer input must specify unit and value (e.g. `secs/5` or `millis/100`)", + ) + })?; + let interval = match unit { + "secs" => { + let value = value.parse().map_err(|_| { + serde::de::Error::custom(format!( + "secs must be an integer (got `{value}`)" + )) + })?; + Duration::from_secs(value) + } + "millis" => { + let value = value.parse().map_err(|_| { + serde::de::Error::custom(format!( + "millis must be an integer (got `{value}`)" + )) + })?; + Duration::from_millis(value) + } + other => { + return Err(serde::de::Error::custom(format!( + "timer unit must be either secs or millis (got `{other}`" + ))) + } + }; + Self::Timer { interval } + } + Some((other, _)) => { + return Err(serde::de::Error::custom(format!( + "unknown dora input `{other}`" + ))) + } + None => return Err(serde::de::Error::custom("dora input has invalid format")), + }, + _ => Self::User(UserInputMapping { + source: source.to_owned().into(), + output: output.to_owned().into(), + }), + }; + + Ok(deserialized) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, JsonSchema)] +pub struct UserInputMapping { + pub source: NodeId, + pub output: DataId, +} + +pub struct FormattedDuration(pub Duration); + +impl fmt::Display for FormattedDuration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0.subsec_millis() == 0 { + write!(f, "secs/{}", self.0.as_secs()) + } else { + write!(f, "millis/{}", self.0.as_millis()) + } + } +} + +pub fn format_duration(interval: Duration) -> FormattedDuration { + FormattedDuration(interval) +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +pub struct NodeRunConfig { + /// Inputs for the nodes as a map from input ID to `node_id/output_id`. + /// + /// e.g. + /// + /// inputs: + /// + /// example_input: example_node/example_output1 + /// + #[serde(default)] + pub inputs: BTreeMap, + /// List of output IDs. + /// + /// e.g. + /// + /// outputs: + /// + /// - output_1 + /// + /// - output_2 + #[serde(default)] + pub outputs: BTreeSet, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, from = "InputDef", into = "InputDef")] +pub struct Input { + pub mapping: InputMapping, + pub queue_size: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum InputDef { + MappingOnly(InputMapping), + WithOptions { + source: InputMapping, + queue_size: Option, + }, +} + +impl From for InputDef { + fn from(input: Input) -> Self { + match input { + Input { + mapping, + queue_size: None, + } => Self::MappingOnly(mapping), + Input { + mapping, + queue_size, + } => Self::WithOptions { + source: mapping, + queue_size, + }, + } + } +} + +impl From for Input { + fn from(value: InputDef) -> Self { + match value { + InputDef::MappingOnly(mapping) => Self { + mapping, + queue_size: None, + }, + InputDef::WithOptions { source, queue_size } => Self { + mapping: source, + queue_size, + }, + } + } +} + +#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone)] +#[serde(deny_unknown_fields, rename_all = "lowercase")] +pub struct CommunicationConfig { + // see https://github.com/dtolnay/serde-yaml/issues/298 + #[serde( + default, + with = "serde_yaml::with::singleton_map", + rename = "_unstable_local" + )] + #[schemars(with = "String")] + pub local: LocalCommunicationConfig, + #[serde( + default, + with = "serde_yaml::with::singleton_map", + rename = "_unstable_remote" + )] + #[schemars(with = "String")] + pub remote: RemoteCommunicationConfig, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub enum LocalCommunicationConfig { + Tcp, + Shmem, +} + +impl Default for LocalCommunicationConfig { + fn default() -> Self { + Self::Tcp + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(deny_unknown_fields, rename_all = "lowercase")] +pub enum RemoteCommunicationConfig { + Tcp, + // TODO:a + // Zenoh { + // config: Option, + // prefix: String, + // }, +} + +impl Default for RemoteCommunicationConfig { + fn default() -> Self { + Self::Tcp + } +} diff --git a/libraries/core/src/coordinator_messages.rs b/libraries/core/src/coordinator_messages.rs new file mode 100644 index 0000000000000000000000000000000000000000..38e9eae2ed0cfef86670426d881e303a007b0fdf --- /dev/null +++ b/libraries/core/src/coordinator_messages.rs @@ -0,0 +1,43 @@ +use crate::daemon_messages::DataflowId; +use eyre::eyre; + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub enum CoordinatorRequest { + Register { + dora_version: String, + machine_id: String, + listen_port: u16, + }, + Event { + machine_id: String, + event: DaemonEvent, + }, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub enum DaemonEvent { + AllNodesReady { + dataflow_id: DataflowId, + success: bool, + }, + AllNodesFinished { + dataflow_id: DataflowId, + result: Result<(), String>, + }, + Heartbeat, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub enum RegisterResult { + Ok, + Err(String), +} + +impl RegisterResult { + pub fn to_result(self) -> eyre::Result<()> { + match self { + RegisterResult::Ok => Ok(()), + RegisterResult::Err(err) => Err(eyre!(err)), + } + } +} diff --git a/libraries/core/src/daemon_messages.rs b/libraries/core/src/daemon_messages.rs new file mode 100644 index 0000000000000000000000000000000000000000..91c634ccaafaf5aa3965f63e029edc6e1b95d66c --- /dev/null +++ b/libraries/core/src/daemon_messages.rs @@ -0,0 +1,269 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, + net::SocketAddr, + path::PathBuf, + time::Duration, +}; + +use crate::{ + config::{DataId, NodeId, NodeRunConfig, OperatorId}, + descriptor::{Descriptor, OperatorDefinition, ResolvedNode}, +}; +use aligned_vec::{AVec, ConstAlign}; +use dora_message::{uhlc, Metadata}; +use uuid::{NoContext, Timestamp, Uuid}; + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct NodeConfig { + pub dataflow_id: DataflowId, + pub node_id: NodeId, + pub run_config: NodeRunConfig, + pub daemon_communication: DaemonCommunication, + pub dataflow_descriptor: Descriptor, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub enum DaemonCommunication { + Shmem { + daemon_control_region_id: SharedMemoryId, + daemon_drop_region_id: SharedMemoryId, + daemon_events_region_id: SharedMemoryId, + daemon_events_close_region_id: SharedMemoryId, + }, + Tcp { + socket_addr: SocketAddr, + }, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct RuntimeConfig { + pub node: NodeConfig, + pub operators: Vec, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub enum DaemonRequest { + Register { + dataflow_id: DataflowId, + node_id: NodeId, + dora_version: String, + }, + Subscribe, + SendMessage { + output_id: DataId, + metadata: Metadata, + data: Option, + }, + CloseOutputs(Vec), + /// Signals that the node is finished sending outputs and that it received all + /// required drop tokens. + OutputsDone, + NextEvent { + drop_tokens: Vec, + }, + ReportDropTokens { + drop_tokens: Vec, + }, + SubscribeDrop, + NextFinishedDropTokens, + EventStreamDropped, +} + +impl DaemonRequest { + pub fn expects_tcp_reply(&self) -> bool { + #[allow(clippy::match_like_matches_macro)] + match self { + DaemonRequest::SendMessage { .. } | DaemonRequest::ReportDropTokens { .. } => false, + DaemonRequest::Register { .. } + | DaemonRequest::Subscribe + | DaemonRequest::CloseOutputs(_) + | DaemonRequest::OutputsDone + | DaemonRequest::NextEvent { .. } + | DaemonRequest::SubscribeDrop + | DaemonRequest::NextFinishedDropTokens + | DaemonRequest::EventStreamDropped => true, + } + } +} + +#[derive(serde::Serialize, serde::Deserialize, Clone)] +pub enum DataMessage { + Vec(AVec>), + SharedMemory { + shared_memory_id: String, + len: usize, + drop_token: DropToken, + }, +} + +impl DataMessage { + pub fn drop_token(&self) -> Option { + match self { + DataMessage::Vec(_) => None, + DataMessage::SharedMemory { drop_token, .. } => Some(*drop_token), + } + } +} + +impl fmt::Debug for DataMessage { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Vec(v) => f + .debug_struct("Vec") + .field("len", &v.len()) + .finish_non_exhaustive(), + Self::SharedMemory { + shared_memory_id, + len, + drop_token, + } => f + .debug_struct("SharedMemory") + .field("shared_memory_id", shared_memory_id) + .field("len", len) + .field("drop_token", drop_token) + .finish(), + } + } +} + +type SharedMemoryId = String; + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[must_use] +pub enum DaemonReply { + Result(Result<(), String>), + PreparedMessage { shared_memory_id: SharedMemoryId }, + NextEvents(Vec>), + NextDropEvents(Vec>), + Empty, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct Timestamped { + pub inner: T, + pub timestamp: uhlc::Timestamp, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub enum NodeEvent { + Stop, + Reload { + operator_id: Option, + }, + Input { + id: DataId, + metadata: Metadata, + data: Option, + }, + InputClosed { + id: DataId, + }, + AllInputsClosed, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub enum NodeDropEvent { + OutputDropped { drop_token: DropToken }, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct DropEvent { + pub tokens: Vec, +} + +#[derive( + Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize, +)] +pub struct DropToken(Uuid); + +impl DropToken { + pub fn generate() -> Self { + Self(Uuid::new_v7(Timestamp::now(NoContext))) + } +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub enum InputData { + SharedMemory(SharedMemoryInput), + Vec(Vec), +} + +impl InputData { + pub fn drop_token(&self) -> Option { + match self { + InputData::SharedMemory(data) => Some(data.drop_token), + InputData::Vec(_) => None, + } + } +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +pub struct SharedMemoryInput { + pub shared_memory_id: SharedMemoryId, + pub len: usize, + pub drop_token: DropToken, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum DaemonCoordinatorEvent { + Spawn(SpawnDataflowNodes), + AllNodesReady { + dataflow_id: DataflowId, + success: bool, + }, + StopDataflow { + dataflow_id: DataflowId, + grace_duration: Option, + }, + ReloadDataflow { + dataflow_id: DataflowId, + node_id: NodeId, + operator_id: Option, + }, + Logs { + dataflow_id: DataflowId, + node_id: NodeId, + }, + Destroy, + Heartbeat, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum InterDaemonEvent { + Output { + dataflow_id: DataflowId, + node_id: NodeId, + output_id: DataId, + metadata: Metadata, + data: Option>>, + }, + InputsClosed { + dataflow_id: DataflowId, + inputs: BTreeSet<(NodeId, DataId)>, + }, +} + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum DaemonCoordinatorReply { + SpawnResult(Result<(), String>), + ReloadResult(Result<(), String>), + StopResult(Result<(), String>), + DestroyResult { + result: Result<(), String>, + #[serde(skip)] + notify: Option>, + }, + Logs(Result, String>), +} + +pub type DataflowId = Uuid; + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub struct SpawnDataflowNodes { + pub dataflow_id: DataflowId, + pub working_dir: PathBuf, + pub nodes: Vec, + pub machine_listen_ports: BTreeMap, + pub dataflow_descriptor: Descriptor, +} diff --git a/libraries/core/src/descriptor/mod.rs b/libraries/core/src/descriptor/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc836412ad5ff1aebcbddd6d4415cbeb41133d17 --- /dev/null +++ b/libraries/core/src/descriptor/mod.rs @@ -0,0 +1,497 @@ +use crate::config::{ + CommunicationConfig, DataId, Input, InputMapping, NodeId, NodeRunConfig, OperatorId, +}; +use eyre::{bail, eyre, Context, OptionExt, Result}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_with_expand_env::with_expand_envs; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + env::consts::EXE_EXTENSION, + fmt, + path::{Path, PathBuf}, +}; +use tracing::warn; +pub use visualize::collect_dora_timers; +mod validate; +mod visualize; +pub const SHELL_SOURCE: &str = "shell"; + +/// Dataflow description +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields)] +#[schemars(title = "dora-rs specification")] +pub struct Descriptor { + #[schemars(skip)] + #[serde(default)] + pub communication: CommunicationConfig, + #[schemars(skip)] + #[serde(default, rename = "_unstable_deploy")] + pub deploy: Deploy, + pub nodes: Vec, +} + +pub const SINGLE_OPERATOR_DEFAULT_ID: &str = "op"; + +impl Descriptor { + pub fn resolve_aliases_and_set_defaults(&self) -> eyre::Result> { + let default_op_id = OperatorId::from(SINGLE_OPERATOR_DEFAULT_ID.to_string()); + + let single_operator_nodes: HashMap<_, _> = self + .nodes + .iter() + .filter_map(|n| { + n.operator + .as_ref() + .map(|op| (&n.id, op.id.as_ref().unwrap_or(&default_op_id))) + }) + .collect(); + + let mut resolved = vec![]; + for mut node in self.nodes.clone() { + // adjust input mappings + let mut node_kind = node.kind_mut()?; + let input_mappings: Vec<_> = match &mut node_kind { + NodeKindMut::Standard { path: _, inputs } => inputs.values_mut().collect(), + NodeKindMut::Runtime(node) => node + .operators + .iter_mut() + .flat_map(|op| op.config.inputs.values_mut()) + .collect(), + NodeKindMut::Custom(node) => node.run_config.inputs.values_mut().collect(), + NodeKindMut::Operator(operator) => operator.config.inputs.values_mut().collect(), + }; + for mapping in input_mappings + .into_iter() + .filter_map(|i| match &mut i.mapping { + InputMapping::Timer { .. } => None, + InputMapping::User(m) => Some(m), + }) + { + if let Some(op_name) = single_operator_nodes.get(&mapping.source).copied() { + mapping.output = DataId::from(format!("{op_name}/{}", mapping.output)); + } + } + + // resolve nodes + let kind = match node_kind { + NodeKindMut::Standard { path, inputs: _ } => CoreNodeKind::Custom(CustomNode { + source: path.clone(), + args: node.args, + build: node.build, + send_stdout_as: node.send_stdout_as, + run_config: NodeRunConfig { + inputs: node.inputs, + outputs: node.outputs, + }, + envs: None, + }), + NodeKindMut::Custom(node) => CoreNodeKind::Custom(node.clone()), + NodeKindMut::Runtime(node) => CoreNodeKind::Runtime(node.clone()), + NodeKindMut::Operator(op) => CoreNodeKind::Runtime(RuntimeNode { + operators: vec![OperatorDefinition { + id: op.id.clone().unwrap_or_else(|| default_op_id.clone()), + config: op.config.clone(), + }], + }), + }; + + resolved.push(ResolvedNode { + id: node.id, + name: node.name, + description: node.description, + env: node.env, + deploy: ResolvedDeploy::new(node.deploy, self), + kind, + }); + } + + Ok(resolved) + } + + pub fn visualize_as_mermaid(&self) -> eyre::Result { + let resolved = self.resolve_aliases_and_set_defaults()?; + let flowchart = visualize::visualize_nodes(&resolved); + + Ok(flowchart) + } + + pub async fn read(path: &Path) -> eyre::Result { + let buf = tokio::fs::read(path) + .await + .context("failed to open given file")?; + Descriptor::parse(buf) + } + + pub fn blocking_read(path: &Path) -> eyre::Result { + let buf = std::fs::read(path).context("failed to open given file")?; + Descriptor::parse(buf) + } + + pub fn parse(buf: Vec) -> eyre::Result { + serde_yaml::from_slice(&buf).context("failed to parse given descriptor") + } + + pub fn check(&self, working_dir: &Path) -> eyre::Result<()> { + validate::check_dataflow(self, working_dir).wrap_err("Dataflow could not be validated.") + } +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct Deploy { + pub machine: Option, +} + +/// Dora Node +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct Node { + /// Node identifier + pub id: NodeId, + /// Node name + pub name: Option, + /// Description of the node + pub description: Option, + /// Environment variables + pub env: Option>, + + /// Unstable machine deployment configuration + #[schemars(skip)] + #[serde(default, rename = "_unstable_deploy")] + pub deploy: Deploy, + + #[serde(default, skip_serializing_if = "Option::is_none")] + operators: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + custom: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + operator: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub args: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub send_stdout_as: Option, + #[serde(default)] + pub inputs: BTreeMap, + #[serde(default)] + pub outputs: BTreeSet, +} + +impl Node { + pub fn kind(&self) -> eyre::Result { + match (&self.path, &self.operators, &self.custom, &self.operator) { + (None, None, None, None) => { + eyre::bail!( + "node `{}` requires a `path`, `custom`, or `operators` field", + self.id + ) + } + (None, None, None, Some(operator)) => Ok(NodeKind::Operator(operator)), + (None, None, Some(custom), None) => Ok(NodeKind::Custom(custom)), + (None, Some(runtime), None, None) => Ok(NodeKind::Runtime(runtime)), + (Some(path), None, None, None) => Ok(NodeKind::Standard(path)), + _ => { + eyre::bail!( + "node `{}` has multiple exclusive fields set, only one of `path`, `custom`, `operators` and `operator` is allowed", + self.id + ) + } + } + } + + fn kind_mut(&mut self) -> eyre::Result { + match self.kind()? { + NodeKind::Standard(_) => self + .path + .as_ref() + .map(|path| NodeKindMut::Standard { + path, + inputs: &mut self.inputs, + }) + .ok_or_eyre("no path"), + NodeKind::Runtime(_) => self + .operators + .as_mut() + .map(NodeKindMut::Runtime) + .ok_or_eyre("no operators"), + NodeKind::Custom(_) => self + .custom + .as_mut() + .map(NodeKindMut::Custom) + .ok_or_eyre("no custom"), + NodeKind::Operator(_) => self + .operator + .as_mut() + .map(NodeKindMut::Operator) + .ok_or_eyre("no operator"), + } + } +} + +#[derive(Debug)] +pub enum NodeKind<'a> { + Standard(&'a String), + /// Dora runtime node + Runtime(&'a RuntimeNode), + Custom(&'a CustomNode), + Operator(&'a SingleOperatorDefinition), +} + +#[derive(Debug)] +enum NodeKindMut<'a> { + Standard { + path: &'a String, + inputs: &'a mut BTreeMap, + }, + /// Dora runtime node + Runtime(&'a mut RuntimeNode), + Custom(&'a mut CustomNode), + Operator(&'a mut SingleOperatorDefinition), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ResolvedNode { + pub id: NodeId, + pub name: Option, + pub description: Option, + pub env: Option>, + + #[serde(default)] + pub deploy: ResolvedDeploy, + + #[serde(flatten)] + pub kind: CoreNodeKind, +} + +impl ResolvedNode { + pub fn send_stdout_as(&self) -> Result> { + match &self.kind { + // TODO: Split stdout between operators + CoreNodeKind::Runtime(n) => { + let count = n + .operators + .iter() + .filter(|op| op.config.send_stdout_as.is_some()) + .count(); + if count == 1 && n.operators.len() > 1 { + warn!("All stdout from all operators of a runtime are going to be sent in the selected `send_stdout_as` operator.") + } else if count > 1 { + return Err(eyre!("More than one `send_stdout_as` entries for a runtime node. Please only use one `send_stdout_as` per runtime.")); + } + Ok(n.operators.iter().find_map(|op| { + op.config + .send_stdout_as + .clone() + .map(|stdout| format!("{}/{}", op.id, stdout)) + })) + } + CoreNodeKind::Custom(n) => Ok(n.send_stdout_as.clone()), + } + } +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ResolvedDeploy { + pub machine: String, +} +impl ResolvedDeploy { + fn new(deploy: Deploy, descriptor: &Descriptor) -> Self { + let default_machine = descriptor.deploy.machine.as_deref().unwrap_or_default(); + let machine = match deploy.machine { + Some(m) => m, + None => default_machine.to_owned(), + }; + Self { machine } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum CoreNodeKind { + /// Dora runtime node + #[serde(rename = "operators")] + Runtime(RuntimeNode), + Custom(CustomNode), +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(transparent)] +pub struct RuntimeNode { + pub operators: Vec, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] +pub struct OperatorDefinition { + pub id: OperatorId, + #[serde(flatten)] + pub config: OperatorConfig, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] +pub struct SingleOperatorDefinition { + /// ID is optional if there is only a single operator. + pub id: Option, + #[serde(flatten)] + pub config: OperatorConfig, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] +pub struct OperatorConfig { + pub name: Option, + pub description: Option, + + #[serde(default)] + pub inputs: BTreeMap, + #[serde(default)] + pub outputs: BTreeSet, + + #[serde(flatten)] + pub source: OperatorSource, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub send_stdout_as: Option, +} + +#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum OperatorSource { + SharedLibrary(String), + Python(PythonSource), + #[schemars(skip)] + Wasm(String), +} +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde( + deny_unknown_fields, + from = "PythonSourceDef", + into = "PythonSourceDef" +)] +pub struct PythonSource { + pub source: String, + pub conda_env: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[serde(untagged)] +pub enum PythonSourceDef { + SourceOnly(String), + WithOptions { + source: String, + conda_env: Option, + }, +} + +impl From for PythonSourceDef { + fn from(input: PythonSource) -> Self { + match input { + PythonSource { + source, + conda_env: None, + } => Self::SourceOnly(source), + PythonSource { source, conda_env } => Self::WithOptions { source, conda_env }, + } + } +} + +impl From for PythonSource { + fn from(value: PythonSourceDef) -> Self { + match value { + PythonSourceDef::SourceOnly(source) => Self { + source, + conda_env: None, + }, + PythonSourceDef::WithOptions { source, conda_env } => Self { source, conda_env }, + } + } +} + +pub fn source_is_url(source: &str) -> bool { + source.contains("://") +} + +pub fn resolve_path(source: &str, working_dir: &Path) -> Result { + let path = Path::new(&source); + let path = if path.extension().is_none() { + path.with_extension(EXE_EXTENSION) + } else { + path.to_owned() + }; + + // Search path within current working directory + if let Ok(abs_path) = working_dir.join(&path).canonicalize() { + Ok(abs_path) + // Search path within $PATH + } else if let Ok(abs_path) = which::which(&path) { + Ok(abs_path) + } else { + bail!("Could not find source path {}", path.display()) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(deny_unknown_fields)] +pub struct PythonOperatorConfig { + pub path: PathBuf, + #[serde(default)] + pub inputs: BTreeMap, + #[serde(default)] + pub outputs: BTreeSet, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +pub struct CustomNode { + /// Path of the source code + /// + /// If you want to use a specific `conda` environment. + /// Provide the python path within the source. + /// + /// source: /home/peter/miniconda3/bin/python + /// + /// args: some_node.py + /// + /// Source can match any executable in PATH. + pub source: String, + /// Args for the executable. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub args: Option, + /// Environment variables for the custom nodes + /// + /// Deprecated, use outer-level `env` field instead. + pub envs: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub build: Option, + /// Send stdout and stderr to another node + #[serde(skip_serializing_if = "Option::is_none")] + pub send_stdout_as: Option, + + #[serde(flatten)] + pub run_config: NodeRunConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(untagged)] +pub enum EnvValue { + #[serde(deserialize_with = "with_expand_envs")] + Bool(bool), + #[serde(deserialize_with = "with_expand_envs")] + Integer(u64), + #[serde(deserialize_with = "with_expand_envs")] + String(String), +} + +impl fmt::Display for EnvValue { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self { + EnvValue::Bool(bool) => fmt.write_str(&bool.to_string()), + EnvValue::Integer(u64) => fmt.write_str(&u64.to_string()), + EnvValue::String(str) => fmt.write_str(str), + } + } +} diff --git a/libraries/core/src/descriptor/validate.rs b/libraries/core/src/descriptor/validate.rs new file mode 100644 index 0000000000000000000000000000000000000000..fe558096775543f7732df7848de6d6ca95483051 --- /dev/null +++ b/libraries/core/src/descriptor/validate.rs @@ -0,0 +1,178 @@ +use crate::{ + adjust_shared_library_path, + config::{DataId, Input, InputMapping, OperatorId, UserInputMapping}, + descriptor::{self, source_is_url, CoreNodeKind, OperatorSource}, + get_python_path, +}; + +use eyre::{bail, eyre, Context}; +use std::{path::Path, process::Command}; +use tracing::info; + +use super::{resolve_path, Descriptor, SHELL_SOURCE}; +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +pub fn check_dataflow(dataflow: &Descriptor, working_dir: &Path) -> eyre::Result<()> { + let nodes = dataflow.resolve_aliases_and_set_defaults()?; + let mut has_python_operator = false; + + // check that nodes and operators exist + for node in &nodes { + match &node.kind { + descriptor::CoreNodeKind::Custom(node) => match node.source.as_str() { + SHELL_SOURCE => (), + source => { + if source_is_url(source) { + info!("{source} is a URL."); // TODO: Implement url check. + } else { + resolve_path(source, working_dir) + .wrap_err_with(|| format!("Could not find source path `{}`", source))?; + }; + } + }, + descriptor::CoreNodeKind::Runtime(node) => { + for operator_definition in &node.operators { + match &operator_definition.config.source { + OperatorSource::SharedLibrary(path) => { + if source_is_url(path) { + info!("{path} is a URL."); // TODO: Implement url check. + } else { + let path = adjust_shared_library_path(Path::new(&path))?; + if !working_dir.join(&path).exists() { + bail!("no shared library at `{}`", path.display()); + } + } + } + OperatorSource::Python(python_source) => { + has_python_operator = true; + let path = &python_source.source; + if source_is_url(path) { + info!("{path} is a URL."); // TODO: Implement url check. + } else if !working_dir.join(path).exists() { + bail!("no Python library at `{path}`"); + } + } + OperatorSource::Wasm(path) => { + if source_is_url(path) { + info!("{path} is a URL."); // TODO: Implement url check. + } else if !working_dir.join(path).exists() { + bail!("no WASM library at `{path}`"); + } + } + } + } + } + } + } + + // check that all inputs mappings point to an existing output + for node in &nodes { + match &node.kind { + descriptor::CoreNodeKind::Custom(custom_node) => { + for (input_id, input) in &custom_node.run_config.inputs { + check_input(input, &nodes, &format!("{}/{input_id}", node.id))?; + } + } + descriptor::CoreNodeKind::Runtime(runtime_node) => { + for operator_definition in &runtime_node.operators { + for (input_id, input) in &operator_definition.config.inputs { + check_input( + input, + &nodes, + &format!("{}/{}/{input_id}", operator_definition.id, node.id), + )?; + } + } + } + }; + } + + // Check that nodes can resolve `send_stdout_as` + for node in &nodes { + node.send_stdout_as() + .context("Could not resolve `send_stdout_as` configuration")?; + } + + if has_python_operator { + check_python_runtime()?; + } + + Ok(()) +} + +fn check_input( + input: &Input, + nodes: &[super::ResolvedNode], + input_id_str: &str, +) -> Result<(), eyre::ErrReport> { + match &input.mapping { + InputMapping::Timer { interval: _ } => {} + InputMapping::User(UserInputMapping { source, output }) => { + let source_node = nodes.iter().find(|n| &n.id == source).ok_or_else(|| { + eyre!("source node `{source}` mapped to input `{input_id_str}` does not exist",) + })?; + match &source_node.kind { + CoreNodeKind::Custom(custom_node) => { + if !custom_node.run_config.outputs.contains(output) { + bail!( + "output `{source}/{output}` mapped to \ + input `{input_id_str}` does not exist", + ); + } + } + CoreNodeKind::Runtime(runtime) => { + let (operator_id, output) = output.split_once('/').unwrap_or_default(); + let operator_id = OperatorId::from(operator_id.to_owned()); + let output = DataId::from(output.to_owned()); + + let operator = runtime + .operators + .iter() + .find(|o| o.id == operator_id) + .ok_or_else(|| { + eyre!( + "source operator `{source}/{operator_id}` used \ + for input `{input_id_str}` does not exist", + ) + })?; + + if !operator.config.outputs.contains(&output) { + bail!( + "output `{source}/{operator_id}/{output}` mapped to \ + input `{input_id_str}` does not exist", + ); + } + } + } + } + }; + Ok(()) +} + +fn check_python_runtime() -> eyre::Result<()> { + // Check if python dora-rs is installed and match cli version + let reinstall_command = + format!("Please reinstall it with: `pip install dora-rs=={VERSION} --force`"); + let mut command = Command::new(get_python_path().context("Could not get python binary")?); + command.args([ + "-c", + &format!( + " +import dora; +assert dora.__version__=='{VERSION}', 'Python dora-rs should be {VERSION}, but current version is %s. {reinstall_command}' % (dora.__version__) + " + ), + ]); + let mut result = command + .spawn() + .wrap_err("Could not spawn python dora-rs command.")?; + let status = result + .wait() + .wrap_err("Could not get exit status when checking python dora-rs")?; + + if !status.success() { + bail!("Something went wrong with Python dora-rs. {reinstall_command}") + } + + Ok(()) +} diff --git a/libraries/core/src/descriptor/visualize.rs b/libraries/core/src/descriptor/visualize.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc0c753011850f547ceca1a5236532b4cbe62b98 --- /dev/null +++ b/libraries/core/src/descriptor/visualize.rs @@ -0,0 +1,214 @@ +use super::{CoreNodeKind, CustomNode, OperatorDefinition, ResolvedNode, RuntimeNode}; +use crate::config::{format_duration, DataId, Input, InputMapping, NodeId, UserInputMapping}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Write as _, + time::Duration, +}; + +pub fn visualize_nodes(nodes: &[ResolvedNode]) -> String { + let mut flowchart = "flowchart TB\n".to_owned(); + let mut all_nodes = HashMap::new(); + + for node in nodes { + visualize_node(node, &mut flowchart); + all_nodes.insert(&node.id, node); + } + + let dora_timers = collect_dora_timers(nodes); + if !dora_timers.is_empty() { + writeln!(flowchart, "subgraph ___dora___ [dora]").unwrap(); + writeln!(flowchart, " subgraph ___timer_timer___ [timer]").unwrap(); + for interval in dora_timers { + let duration = format_duration(interval); + writeln!(flowchart, " dora/timer/{duration}[\\{duration}/]").unwrap(); + } + flowchart.push_str(" end\n"); + flowchart.push_str("end\n"); + } + + for node in nodes { + visualize_node_inputs(node, &mut flowchart, &all_nodes) + } + + flowchart +} + +pub fn collect_dora_timers(nodes: &[ResolvedNode]) -> BTreeSet { + let mut dora_timers = BTreeSet::new(); + for node in nodes { + match &node.kind { + CoreNodeKind::Runtime(node) => { + for operator in &node.operators { + collect_dora_nodes(operator.config.inputs.values(), &mut dora_timers); + } + } + CoreNodeKind::Custom(node) => { + collect_dora_nodes(node.run_config.inputs.values(), &mut dora_timers); + } + } + } + dora_timers +} + +fn collect_dora_nodes( + values: std::collections::btree_map::Values, + dora_timers: &mut BTreeSet, +) { + for input in values { + match &input.mapping { + InputMapping::User(_) => {} + InputMapping::Timer { interval } => { + dora_timers.insert(*interval); + } + } + } +} + +fn visualize_node(node: &ResolvedNode, flowchart: &mut String) { + let node_id = &node.id; + match &node.kind { + CoreNodeKind::Custom(node) => visualize_custom_node(node_id, node, flowchart), + CoreNodeKind::Runtime(RuntimeNode { operators, .. }) => { + visualize_runtime_node(node_id, operators, flowchart) + } + } +} + +fn visualize_custom_node(node_id: &NodeId, node: &CustomNode, flowchart: &mut String) { + if node.run_config.inputs.is_empty() { + // source node + writeln!(flowchart, " {node_id}[\\{node_id}/]").unwrap(); + } else if node.run_config.outputs.is_empty() { + // sink node + writeln!(flowchart, " {node_id}[/{node_id}\\]").unwrap(); + } else { + // normal node + writeln!(flowchart, " {node_id}").unwrap(); + } +} + +fn visualize_runtime_node( + node_id: &NodeId, + operators: &[OperatorDefinition], + flowchart: &mut String, +) { + if operators.len() == 1 && operators[0].id.to_string() == "op" { + let operator = &operators[0]; + // single operator node + if operator.config.inputs.is_empty() { + // source node + writeln!(flowchart, " {node_id}/op[\\{node_id}/]").unwrap(); + } else if operator.config.outputs.is_empty() { + // sink node + writeln!(flowchart, " {node_id}/op[/{node_id}\\]").unwrap(); + } else { + // normal node + writeln!(flowchart, " {node_id}/op[{node_id}]").unwrap(); + } + } else { + writeln!(flowchart, "subgraph {node_id}").unwrap(); + for operator in operators { + let operator_id = &operator.id; + if operator.config.inputs.is_empty() { + // source operator + writeln!(flowchart, " {node_id}/{operator_id}[\\{operator_id}/]").unwrap(); + } else if operator.config.outputs.is_empty() { + // sink operator + writeln!(flowchart, " {node_id}/{operator_id}[/{operator_id}\\]").unwrap(); + } else { + // normal operator + writeln!(flowchart, " {node_id}/{operator_id}[{operator_id}]").unwrap(); + } + } + flowchart.push_str("end\n"); + } +} + +fn visualize_node_inputs( + node: &ResolvedNode, + flowchart: &mut String, + nodes: &HashMap<&NodeId, &ResolvedNode>, +) { + let node_id = &node.id; + match &node.kind { + CoreNodeKind::Custom(node) => visualize_inputs( + &node_id.to_string(), + &node.run_config.inputs, + flowchart, + nodes, + ), + CoreNodeKind::Runtime(RuntimeNode { operators, .. }) => { + for operator in operators { + visualize_inputs( + &format!("{node_id}/{}", operator.id), + &operator.config.inputs, + flowchart, + nodes, + ) + } + } + } +} + +fn visualize_inputs( + target: &str, + inputs: &BTreeMap, + flowchart: &mut String, + nodes: &HashMap<&NodeId, &ResolvedNode>, +) { + for (input_id, input) in inputs { + match &input.mapping { + mapping @ InputMapping::Timer { .. } => { + writeln!(flowchart, " {} -- {input_id} --> {target}", mapping).unwrap(); + } + InputMapping::User(mapping) => { + visualize_user_mapping(mapping, target, nodes, input_id, flowchart) + } + } + } +} + +fn visualize_user_mapping( + mapping: &UserInputMapping, + target: &str, + nodes: &HashMap<&NodeId, &ResolvedNode>, + input_id: &DataId, + flowchart: &mut String, +) { + let UserInputMapping { source, output } = mapping; + let mut source_found = false; + if let Some(source_node) = nodes.get(source) { + match &source_node.kind { + CoreNodeKind::Custom(custom_node) => { + if custom_node.run_config.outputs.contains(output) { + let data = if output == input_id { + format!("{output}") + } else { + format!("{output} as {input_id}") + }; + writeln!(flowchart, " {source} -- {data} --> {target}").unwrap(); + source_found = true; + } + } + CoreNodeKind::Runtime(RuntimeNode { operators, .. }) => { + let (operator_id, output) = output.split_once('/').unwrap_or(("", output)); + if let Some(operator) = operators.iter().find(|o| o.id.as_ref() == operator_id) { + if operator.config.outputs.contains(output) { + let data = if output == input_id.as_str() { + output.to_string() + } else { + format!("{output} as {input_id}") + }; + writeln!(flowchart, " {source}/{operator_id} -- {data} --> {target}") + .unwrap(); + source_found = true; + } + } + } + } + } + if !source_found { + writeln!(flowchart, " missing>missing] -- {input_id} --> {target}").unwrap(); + } +} diff --git a/libraries/core/src/lib.rs b/libraries/core/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..83ce0dc5529846ed455dc178704580d193471927 --- /dev/null +++ b/libraries/core/src/lib.rs @@ -0,0 +1,75 @@ +use eyre::{bail, eyre, Context}; +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX}, + ffi::OsStr, + path::Path, +}; + +pub use dora_message as message; + +pub mod config; +pub mod coordinator_messages; +pub mod daemon_messages; +pub mod descriptor; +pub mod topics; + +pub fn adjust_shared_library_path(path: &Path) -> Result { + let file_name = path + .file_name() + .ok_or_else(|| eyre!("shared library path has no file name"))? + .to_str() + .ok_or_else(|| eyre!("shared library file name is not valid UTF8"))?; + if file_name.starts_with("lib") { + bail!("Shared library file name must not start with `lib`, prefix is added automatically"); + } + if path.extension().is_some() { + bail!("Shared library file name must have no extension, it is added automatically"); + } + + let library_filename = format!("{DLL_PREFIX}{file_name}{DLL_SUFFIX}"); + + let path = path.with_file_name(library_filename); + Ok(path) +} + +// Search for python binary. +// Match `python` for windows and `python3` for other platforms. +pub fn get_python_path() -> Result { + let python = if cfg!(windows) { + which::which("python") + .context("failed to find `python` or `python3`. Make sure that python is available.")? + } else { + which::which("python3") + .context("failed to find `python` or `python3`. Make sure that python is available.")? + }; + Ok(python) +} + +// Search for pip binary. +// First search for `pip3` as for ubuntu <20, `pip` can resolves to `python2,7 -m pip` +// Then search for `pip`, this will resolve for windows to python3 -m pip. +pub fn get_pip_path() -> Result { + let python = match which::which("pip3") { + Ok(python) => python, + Err(_) => which::which("pip") + .context("failed to find `pip3` or `pip`. Make sure that python is available.")?, + }; + Ok(python) +} + +// Helper function to run a program +pub async fn run(program: S, args: &[&str], pwd: Option<&Path>) -> eyre::Result<()> +where + S: AsRef, +{ + let mut run = tokio::process::Command::new(program); + run.args(args); + + if let Some(pwd) = pwd { + run.current_dir(pwd); + } + if !run.status().await?.success() { + eyre::bail!("failed to run {args:?}"); + }; + Ok(()) +} diff --git a/libraries/core/src/topics.rs b/libraries/core/src/topics.rs new file mode 100644 index 0000000000000000000000000000000000000000..506c1b420e93c682fc49cf19a7a4a721b4d9e221 --- /dev/null +++ b/libraries/core/src/topics.rs @@ -0,0 +1,88 @@ +use std::{collections::BTreeSet, fmt::Display, path::PathBuf, time::Duration}; +use uuid::Uuid; + +use crate::{ + config::{NodeId, OperatorId}, + descriptor::Descriptor, +}; + +pub const DORA_COORDINATOR_PORT_DEFAULT: u16 = 0xD02A; +pub const DORA_COORDINATOR_PORT_CONTROL_DEFAULT: u16 = 0x177C; + +pub const MANUAL_STOP: &str = "dora/stop"; + +#[derive(Debug, serde::Deserialize, serde::Serialize)] +pub enum ControlRequest { + Start { + dataflow: Descriptor, + name: Option, + // TODO: remove this once we figure out deploying of node/operator + // binaries from CLI to coordinator/daemon + local_working_dir: PathBuf, + }, + Reload { + dataflow_id: Uuid, + node_id: NodeId, + operator_id: Option, + }, + Check { + dataflow_uuid: Uuid, + }, + Stop { + dataflow_uuid: Uuid, + grace_duration: Option, + }, + StopByName { + name: String, + grace_duration: Option, + }, + Logs { + uuid: Option, + name: Option, + node: String, + }, + Destroy, + List, + DaemonConnected, + ConnectedMachines, +} + +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub enum ControlRequestReply { + Error(String), + CoordinatorStopped, + DataflowStarted { + uuid: Uuid, + }, + DataflowReloaded { + uuid: Uuid, + }, + DataflowStopped { + uuid: Uuid, + result: Result<(), String>, + }, + + DataflowList { + dataflows: Vec, + }, + DestroyOk, + DaemonConnected(bool), + ConnectedMachines(BTreeSet), + Logs(Vec), +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct DataflowId { + pub uuid: Uuid, + pub name: Option, +} + +impl Display for DataflowId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Some(name) = &self.name { + write!(f, "[{name}] {}", self.uuid) + } else { + write!(f, "[] {}", self.uuid) + } + } +} diff --git a/libraries/extensions/download/Cargo.toml b/libraries/extensions/download/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e4e0e5e883d6132b95846dea5b7c8171d710958f --- /dev/null +++ b/libraries/extensions/download/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-download" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eyre = "0.6.8" +reqwest = { version = "0.12.4", default-features = false, features = [ + "rustls-tls", +] } +tokio = { version = "1.24.2", features = ["fs"] } +tracing = "0.1.36" diff --git a/libraries/extensions/download/src/lib.rs b/libraries/extensions/download/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b0843c83ffdc00b1ab9fceb5ae0b4913acbd3029 --- /dev/null +++ b/libraries/extensions/download/src/lib.rs @@ -0,0 +1,43 @@ +use eyre::Context; +#[cfg(unix)] +use std::os::unix::prelude::PermissionsExt; +use std::path::Path; +use tokio::io::AsyncWriteExt; +use tracing::info; + +pub async fn download_file(url: T, target_path: &Path) -> Result<(), eyre::ErrReport> +where + T: reqwest::IntoUrl + std::fmt::Display + Copy, +{ + if target_path.exists() { + info!("Using cache: {:?}", target_path.to_str()); + return Ok(()); + } + + if let Some(parent) = target_path.parent() { + tokio::fs::create_dir_all(parent) + .await + .wrap_err("failed to create parent folder")?; + } + + let response = reqwest::get(url) + .await + .wrap_err_with(|| format!("failed to request operator from `{url}`"))? + .bytes() + .await + .wrap_err("failed to read operator from `{uri}`")?; + let mut file = tokio::fs::File::create(target_path) + .await + .wrap_err("failed to create target file")?; + file.write_all(&response) + .await + .wrap_err("failed to write downloaded operator to file")?; + file.sync_all().await.wrap_err("failed to `sync_all`")?; + + #[cfg(unix)] + file.set_permissions(std::fs::Permissions::from_mode(0o764)) + .await + .wrap_err("failed to make downloaded file executable")?; + + Ok(()) +} diff --git a/libraries/extensions/ros2-bridge/Cargo.toml b/libraries/extensions/ros2-bridge/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..96a2ad8692ae15f0f5221d4178b63c0ca3301247 --- /dev/null +++ b/libraries/extensions/ros2-bridge/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "dora-ros2-bridge" +version = "0.1.0" +edition = "2021" +links = "dora-ros2-bridge" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["generate-messages"] +generate-messages = ["dep:dora-ros2-bridge-msg-gen", "dep:rust-format"] +# enables examples that depend on a sourced ROS2 installation +ros2-examples = ["dep:eyre", "tokio", "dora-daemon"] + +[dependencies] +array-init = "2.1.0" +serde = { version = "1.0.164", features = ["derive"] } +serde-big-array = "0.5.1" +widestring = "1.0.2" +ros2-client = "0.7.1" +rustdds = "0.10.0" +eyre = { version = "0.6.8", optional = true } +tokio = { version = "1.29.1", features = ["full"], optional = true } +dora-daemon = { path = "../../../binaries/daemon", optional = true } +tracing = "0.1.37" +tracing-subscriber = "0.3.17" +flume = "0.11.0" +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.3" + +[dev-dependencies] +rand = "0.8.5" +futures = { version = "0.3.28", default-features = false } + +[build-dependencies] +dora-ros2-bridge-msg-gen = { workspace = true, optional = true } +rust-format = { version = "0.3.4", features = [ + "pretty_please", +], optional = true } diff --git a/libraries/extensions/ros2-bridge/build.rs b/libraries/extensions/ros2-bridge/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..46b3f0b5a124c725a3fa0d8364bccff4e3c18b0f --- /dev/null +++ b/libraries/extensions/ros2-bridge/build.rs @@ -0,0 +1,44 @@ +#[cfg(feature = "generate-messages")] +use std::path::PathBuf; + +#[cfg(not(feature = "generate-messages"))] +fn main() {} + +#[cfg(feature = "generate-messages")] +fn main() { + use rust_format::Formatter; + let paths = ament_prefix_paths(); + let generated = dora_ros2_bridge_msg_gen::gen(paths.as_slice(), false); + let generated_string = rust_format::PrettyPlease::default() + .format_tokens(generated) + .unwrap(); + let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); + let target_file = out_dir.join("messages.rs"); + std::fs::write(&target_file, generated_string).unwrap(); + println!("cargo:rustc-env=MESSAGES_PATH={}", target_file.display()); +} + +#[cfg(feature = "generate-messages")] +fn ament_prefix_paths() -> Vec { + let ament_prefix_path: String = match std::env::var("AMENT_PREFIX_PATH") { + Ok(path) => path, + Err(std::env::VarError::NotPresent) => { + println!("cargo:warning='AMENT_PREFIX_PATH not set'"); + String::new() + } + Err(std::env::VarError::NotUnicode(s)) => { + panic!( + "AMENT_PREFIX_PATH is not valid unicode: `{}`", + s.to_string_lossy() + ); + } + }; + println!("cargo:rerun-if-env-changed=AMENT_PREFIX_PATH"); + + let paths: Vec<_> = ament_prefix_path.split(':').map(PathBuf::from).collect(); + for path in &paths { + println!("cargo:rerun-if-changed={}", path.display()); + } + + paths +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/Cargo.toml b/libraries/extensions/ros2-bridge/msg-gen/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2bd92348b3d2d5a34c3a39e3e967823c3773620d --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "dora-ros2-bridge-msg-gen" +version = "0.1.0" +edition = "2021" +authors = ["Yuma Hiramatsu "] +license = "Apache-2.0" + +[dependencies] +anyhow = "1.0" +heck = "0.3" +nom = "7" +proc-macro2 = "1.0" +quote = "1.0" +regex = "1" +syn = "1.0" +thiserror = "1.0" +glob = "0.3.1" +tracing = "0.1.33" diff --git a/libraries/extensions/ros2-bridge/msg-gen/build.rs b/libraries/extensions/ros2-bridge/msg-gen/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d5bff7515113ccf444d4bfe06e34936cd02a475 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/build.rs @@ -0,0 +1,25 @@ +use std::path::Path; + +fn main() { + let ament_prefix_path = match std::env::var("AMENT_PREFIX_PATH") { + Ok(path) => path, + Err(std::env::VarError::NotPresent) => { + println!("cargo:warning='AMENT_PREFIX_PATH not set'"); + String::new() + } + Err(std::env::VarError::NotUnicode(s)) => { + panic!( + "AMENT_PREFIX_PATH is not valid unicode: `{}`", + s.to_string_lossy() + ); + } + }; + println!("cargo:rerun-if-env-changed=AMENT_PREFIX_PATH"); + + let paths = ament_prefix_path.split(':').map(Path::new); + for path in paths { + println!("cargo:rerun-if-changed={}", path.display()); + } + + println!("cargo:rustc-env=DETECTED_AMENT_PREFIX_PATH={ament_prefix_path}"); +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/lib.rs b/libraries/extensions/ros2-bridge/msg-gen/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..c64f809bfa94383e4a6ec28315ea9e0f73449b30 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/lib.rs @@ -0,0 +1,282 @@ +// Based on https://github.com/rclrust/rclrust/tree/3a48dbb8f23a3d67d3031351da3ed236a354f039/rclrust-msg-gen + +#![warn( + rust_2018_idioms, + elided_lifetimes_in_paths, + clippy::all, + clippy::nursery +)] + +use std::path::Path; + +use quote::quote; + +pub mod parser; +pub mod types; + +pub use crate::parser::get_packages; + +pub fn gen

(paths: &[P], create_cxx_bridge: bool) -> proc_macro2::TokenStream +where + P: AsRef, +{ + let packages = get_packages(paths).unwrap(); + let mut shared_type_defs = Vec::new(); + let mut message_struct_impls = Vec::new(); + let mut message_topic_defs = Vec::new(); + let mut message_topic_impls = Vec::new(); + let mut service_defs = Vec::new(); + let mut service_impls = Vec::new(); + let mut service_creation_defs = Vec::new(); + let mut service_creation_impls = Vec::new(); + let mut aliases = Vec::new(); + for package in &packages { + for message in &package.messages { + let (def, imp) = message.struct_token_stream(&package.name, create_cxx_bridge); + shared_type_defs.push(def); + message_struct_impls.push(imp); + if create_cxx_bridge { + let (topic_def, topic_impl) = message.topic_def(&package.name); + message_topic_defs.push(topic_def); + message_topic_impls.push(topic_impl); + } + } + + for service in &package.services { + let (def, imp) = service.struct_token_stream(&package.name, create_cxx_bridge); + service_defs.push(def); + service_impls.push(imp); + if create_cxx_bridge { + let (service_creation_def, service_creation_impl) = + service.cxx_service_creation_functions(&package.name); + service_creation_defs.push(service_creation_def); + service_creation_impls.push(service_creation_impl); + } + } + + aliases.push(package.aliases_token_stream()); + } + + let (attributes, imports_and_functions, cxx_bridge_impls) = if create_cxx_bridge { + ( + quote! { #[cxx::bridge] }, + quote! { + #[allow(dead_code)] + extern "C++" { + type CombinedEvents = crate::ffi::CombinedEvents; + type CombinedEvent = crate::ffi::CombinedEvent; + } + + extern "Rust" { + type Ros2Context; + type Ros2Node; + fn init_ros2_context() -> Result>; + fn new_node(self: &Ros2Context, name_space: &str, base_name: &str) -> Result>; + fn qos_default() -> Ros2QosPolicies; + + #(#message_topic_defs)* + #(#service_creation_defs)* + } + + #[derive(Debug, Clone)] + pub struct Ros2QosPolicies { + pub durability: Ros2Durability, + pub liveliness: Ros2Liveliness, + pub lease_duration: f64, + pub reliable: bool, + pub max_blocking_time: f64, + pub keep_all: bool, + pub keep_last: i32, + } + + /// DDS 2.2.3.4 DURABILITY + #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub enum Ros2Durability { + Volatile, + TransientLocal, + Transient, + Persistent, + } + + /// DDS 2.2.3.11 LIVELINESS + #[derive(Copy, Clone, Debug, PartialEq)] + pub enum Ros2Liveliness { + Automatic, + ManualByParticipant, + ManualByTopic, + } + }, + quote! { + struct Ros2Context{ + context: ros2_client::Context, + executor: std::sync::Arc, + } + + fn init_ros2_context() -> eyre::Result> { + Ok(Box::new(Ros2Context{ + context: ros2_client::Context::new()?, + executor: std::sync::Arc::new(futures::executor::ThreadPool::new()?), + })) + } + + impl Ros2Context { + fn new_node(&self, name_space: &str, base_name: &str) -> eyre::Result> { + use futures::task::SpawnExt as _; + use eyre::WrapErr as _; + + let name = ros2_client::NodeName::new(name_space, base_name).map_err(|e| eyre::eyre!(e))?; + let options = ros2_client::NodeOptions::new().enable_rosout(true); + let mut node = self.context.new_node(name, options) + .map_err(|e| eyre::eyre!("failed to create ROS2 node: {e:?}"))?; + + let spinner = node.spinner().context("failed to create spinner")?; + self.executor.spawn(async { + if let Err(err) = spinner.spin().await { + eprintln!("ros2 spinner failed: {err:?}"); + } + }) + .context("failed to spawn ros2 spinner")?; + + Ok(Box::new(Ros2Node{ node, executor: self.executor.clone(), })) + } + } + + struct Ros2Node { + node : ros2_client::Node, + executor: std::sync::Arc, + } + + fn qos_default() -> ffi::Ros2QosPolicies { + ffi::Ros2QosPolicies::new(None, None, None, None, None, None, None) + } + + impl ffi::Ros2QosPolicies { + pub fn new( + durability: Option, + liveliness: Option, + reliable: Option, + keep_all: Option, + lease_duration: Option, + max_blocking_time: Option, + keep_last: Option, + ) -> Self { + Self { + durability: durability.unwrap_or(ffi::Ros2Durability::Volatile), + liveliness: liveliness.unwrap_or(ffi::Ros2Liveliness::Automatic), + lease_duration: lease_duration.unwrap_or(f64::INFINITY), + reliable: reliable.unwrap_or(false), + max_blocking_time: max_blocking_time.unwrap_or(0.0), + keep_all: keep_all.unwrap_or(false), + keep_last: keep_last.unwrap_or(1), + } + } + } + + impl From for rustdds::QosPolicies { + fn from(value: ffi::Ros2QosPolicies) -> Self { + rustdds::QosPolicyBuilder::new() + .durability(value.durability.into()) + .liveliness(value.liveliness.convert(value.lease_duration)) + .reliability(if value.reliable { + rustdds::policy::Reliability::Reliable { + max_blocking_time: rustdds::Duration::from_frac_seconds( + value.max_blocking_time, + ), + } + } else { + rustdds::policy::Reliability::BestEffort + }) + .history(if value.keep_all { + rustdds::policy::History::KeepAll + } else { + rustdds::policy::History::KeepLast { + depth: value.keep_last, + } + }) + .build() + } + } + + + + impl From for rustdds::policy::Durability { + fn from(value: ffi::Ros2Durability) -> Self { + match value { + ffi::Ros2Durability::Volatile => rustdds::policy::Durability::Volatile, + ffi::Ros2Durability::TransientLocal => rustdds::policy::Durability::TransientLocal, + ffi::Ros2Durability::Transient => rustdds::policy::Durability::Transient, + ffi::Ros2Durability::Persistent => rustdds::policy::Durability::Persistent, + _ => unreachable!(), // required because enums are represented as integers in bridge + } + } + } + + + impl ffi::Ros2Liveliness { + fn convert(self, lease_duration: f64) -> rustdds::policy::Liveliness { + let lease_duration = if lease_duration.is_infinite() { + rustdds::Duration::INFINITE + } else { + rustdds::Duration::from_frac_seconds(lease_duration) + }; + match self { + ffi::Ros2Liveliness::Automatic => rustdds::policy::Liveliness::Automatic { lease_duration }, + ffi::Ros2Liveliness::ManualByParticipant => { + rustdds::policy::Liveliness::ManualByParticipant { lease_duration } + } + ffi::Ros2Liveliness::ManualByTopic => rustdds::policy::Liveliness::ManualByTopic { lease_duration }, + _ => unreachable!(), // required because enums are represented as integers in bridge + } + } + } + }, + ) + } else { + ( + quote! {}, + quote! { + use serde::{Serialize, Deserialize}; + }, + quote! {}, + ) + }; + + quote! { + #attributes + mod ffi { + #imports_and_functions + + #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] + pub struct U16String { + pub chars: Vec, + } + + #(#shared_type_defs)* + #(#service_defs)* + } + + + impl crate::_core::InternalDefault for ffi::U16String { + fn _default() -> Self { + Default::default() + } + } + + impl ffi::U16String { + fn from_str(arg: &str) -> Self { + Self { chars: crate::_core::widestring::U16String::from_str(arg).into_vec()} + } + } + + #(#message_struct_impls)* + + #cxx_bridge_impls + #(#message_topic_impls)* + #(#service_creation_impls)* + + + #(#service_impls)* + + #(#aliases)* + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/action.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/action.rs new file mode 100644 index 0000000000000000000000000000000000000000..eb279d664b155340382c3ca21a0df247755443d4 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/action.rs @@ -0,0 +1,108 @@ +use std::{fs, path::Path}; + +use anyhow::{Context, Result}; +use regex::Regex; + +use super::{error::RclMsgError, message::parse_message_string}; +use crate::types::Action; + +const ACTION_GOAL_SUFFIX: &str = "_Goal"; +const ACTION_RESULT_SUFFIX: &str = "_Result"; +const ACTION_FEEDBACK_SUFFIX: &str = "_Feedback"; + +pub fn parse_action_file>(pkg_name: &str, interface_file: P) -> Result { + parse_action_string( + pkg_name, + interface_file + .as_ref() + .file_stem() + .unwrap() + .to_str() + .unwrap(), + fs::read_to_string(interface_file.as_ref())?.as_str(), + ) + .with_context(|| format!("Parse file error: {}", interface_file.as_ref().display())) +} + +fn parse_action_string(pkg_name: &str, action_name: &str, action_string: &str) -> Result { + let re = Regex::new(r"(?m)^---\r?$").unwrap(); + let action_blocks: Vec<_> = re.split(action_string).collect(); + if action_blocks.len() != 3 { + return Err(RclMsgError::InvalidActionSpecification(format!( + "Number of '---' separators nonconformant with action definition (is {}):\n{action_blocks:?}", + action_blocks.len(), + )) + .into()); + } + + Ok(Action { + package: pkg_name.into(), + name: action_name.into(), + goal: parse_message_string( + pkg_name, + &format!("{}{}", action_name, ACTION_GOAL_SUFFIX), + action_blocks[0], + )?, + result: parse_message_string( + pkg_name, + &format!("{}{}", action_name, ACTION_RESULT_SUFFIX), + action_blocks[1], + )?, + feedback: parse_message_string( + pkg_name, + &format!("{}{}", action_name, ACTION_FEEDBACK_SUFFIX), + action_blocks[2], + )?, + }) +} + +#[cfg(test)] +mod test { + use std::path::PathBuf; + + use super::*; + use crate::types::{primitives::*, sequences::*, MemberType}; + + fn parse_action_def(srv_name: &str) -> Result { + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join(format!("test_msgs/action/{}.action", srv_name)); + parse_action_file("test_msgs", path) + } + + #[test] + fn parse_fibonacci() -> Result<()> { + let action = parse_action_def("Fibonacci")?; + assert_eq!(action.package, "test_msgs".to_string()); + assert_eq!(action.name, "Fibonacci".to_string()); + + assert_eq!(action.goal.name, "Fibonacci_Goal".to_string()); + assert_eq!(action.goal.members.len(), 1); + assert_eq!(action.goal.members[0].name, "order".to_string()); + assert_eq!(action.goal.members[0].r#type, BasicType::I32.into()); + assert_eq!(action.goal.constants.len(), 0); + + assert_eq!(action.result.name, "Fibonacci_Result".to_string()); + assert_eq!(action.result.members.len(), 1); + assert_eq!(action.result.members[0].name, "sequence".to_string()); + assert_eq!( + action.result.members[0].r#type, + MemberType::Sequence(Sequence { + value_type: NestableType::BasicType(BasicType::I32) + }) + ); + assert_eq!(action.result.constants.len(), 0); + + assert_eq!(action.feedback.name, "Fibonacci_Feedback".to_string()); + assert_eq!(action.feedback.members.len(), 1); + assert_eq!(action.feedback.members[0].name, "sequence".to_string()); + assert_eq!( + action.feedback.members[0].r#type, + MemberType::Sequence(Sequence { + value_type: NestableType::BasicType(BasicType::I32) + }) + ); + assert_eq!(action.feedback.constants.len(), 0); + + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/constant.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/constant.rs new file mode 100644 index 0000000000000000000000000000000000000000..08cffdbc33ae655e7545818470fa71a7d3816c6e --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/constant.rs @@ -0,0 +1,85 @@ +use anyhow::{ensure, Result}; +use nom::{ + bytes::complete::is_not, + character::complete::{char, space0, space1}, + combinator::{eof, recognize}, + multi::separated_list1, + sequence::tuple, +}; + +use super::{error::RclMsgError, ident, literal, types}; +use crate::types::{primitives::PrimitiveType, Constant, ConstantType}; + +fn validate_value(r#type: ConstantType, value: &str) -> Result> { + match r#type { + ConstantType::PrimitiveType(t) => match t { + PrimitiveType::BasicType(t) => { + let (rest, value) = literal::get_basic_type_literal_parser(t)(value) + .map_err(|_| RclMsgError::ParseConstantValueError(value.into()))?; + ensure!(rest.is_empty()); + Ok(vec![value]) + } + PrimitiveType::GenericUnboundedString(t) => { + let (rest, default) = literal::get_string_literal_parser(t.into())(value) + .map_err(|_| RclMsgError::ParseDefaultValueError(value.into()))?; + ensure!(rest.is_empty()); + Ok(vec![default]) + } + }, + ConstantType::PrimitiveArray(array_t) => match array_t.value_type { + PrimitiveType::BasicType(t) => { + let (rest, values) = literal::basic_type_sequence(t, value) + .map_err(|_| RclMsgError::ParseDefaultValueError(value.into()))?; + ensure!(rest.is_empty()); + ensure!(values.len() == array_t.size); + + Ok(values) + } + PrimitiveType::GenericUnboundedString(_) => { + let (rest, values) = literal::string_literal_sequence(value) + .map_err(|_| RclMsgError::ParseDefaultValueError(value.into()))?; + ensure!(rest.is_empty()); + Ok(values) + } + }, + } +} + +pub fn constant_def(line: &str) -> Result { + let (_, (r#type, _, name, _, _, _, value, _, _)) = tuple(( + types::parse_constant_type, + space1, + ident::constant_name, + space0, + char('='), + space0, + recognize(separated_list1(space1, is_not(" \t"))), + space0, + eof, + ))(line) + .map_err(|e| RclMsgError::ParseConstantError { + reason: e.to_string(), + input: line.into(), + })?; + + Ok(Constant { + name: name.into(), + r#type: r#type.clone(), + value: validate_value(r#type, value)?, + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::types::primitives::BasicType; + + #[test] + fn parse_member_def_with_default() -> Result<()> { + let result = constant_def("int32 AAA=30")?; + assert_eq!(result.name, "AAA"); + assert_eq!(result.r#type, BasicType::I32.into()); + assert_eq!(result.value, vec!["30"]); + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/error.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..d28a34d618a26acea45592003b1fc2ed67ce262c --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/error.rs @@ -0,0 +1,25 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum RclMsgError { + #[error("Fail to parse member definition: {reason}\ninput: {input}")] + ParseMemberError { input: String, reason: String }, + + #[error("{0} can not have default value")] + InvalidDefaultError(String), + + #[error("Fail to parse default value: {0}")] + ParseDefaultValueError(String), + + #[error("Fail to parse constant definition: {reason}\ninput: {input}")] + ParseConstantError { input: String, reason: String }, + + #[error("Fail to parse constant value: {0}")] + ParseConstantValueError(String), + + #[error("Invalid service specification: {0}")] + InvalidServiceSpecification(String), + + #[error("Invalid action specification: {0}")] + InvalidActionSpecification(String), +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/ident.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/ident.rs new file mode 100644 index 0000000000000000000000000000000000000000..76e59a8fdae057ce38a01e3cf4c731693ca1a323 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/ident.rs @@ -0,0 +1,92 @@ +use nom::{ + branch::alt, + character::complete::{alphanumeric0, char, one_of}, + combinator::{opt, recognize}, + multi::{many1, separated_list0, separated_list1}, + sequence::{pair, tuple}, + IResult, +}; + +fn upperalpha(s: &str) -> IResult<&str, char> { + one_of("ABCDEFGHIJKLMNOPQRSTUVWXYZ")(s) +} + +fn loweralpha(s: &str) -> IResult<&str, char> { + one_of("abcdefghijklmnopqrstuvwxyz")(s) +} + +fn numeric(s: &str) -> IResult<&str, char> { + one_of("0123456789")(s) +} + +pub fn package_name(s: &str) -> IResult<&str, &str> { + recognize(tuple(( + loweralpha, + opt(char('_')), + separated_list1(char('_'), many1(alt((loweralpha, numeric)))), + )))(s) +} + +pub fn member_name(s: &str) -> IResult<&str, &str> { + recognize(tuple(( + loweralpha, + opt(char('_')), + separated_list0(char('_'), many1(alt((loweralpha, numeric)))), + )))(s) +} + +pub fn message_name(s: &str) -> IResult<&str, &str> { + recognize(pair(upperalpha, alphanumeric0))(s) +} + +pub fn constant_name(s: &str) -> IResult<&str, &str> { + recognize(separated_list1( + char('_'), + many1(alt((upperalpha, numeric))), + ))(s) +} + +#[cfg(test)] +mod test { + use anyhow::Result; + + use super::*; + + #[test] + fn parse_member_name() -> Result<()> { + assert_eq!(member_name("abc034_fs3_u3")?.1, "abc034_fs3_u3"); + Ok(()) + } + + #[test] + fn parse_member_name_should_fail_if_starting_with_underscore() { + assert!(member_name("_invalid_identifier").is_err()); + } + + #[test] + fn parse_member_name_should_fail_if_starting_with_number() { + assert!(member_name("0invalid_identifier").is_err()); + } + + #[test] + fn parse_message_name() -> Result<()> { + assert_eq!(message_name("StdMsgs12")?.1, "StdMsgs12"); + Ok(()) + } + + #[test] + fn parse_message_name_should_fail_if_starting_with_wrong_char() { + assert!(message_name("aStdMsgs12").is_err()); + } + + #[test] + fn parse_constant_name() -> Result<()> { + assert_eq!(constant_name("C_O_N_STAN_T")?.1, "C_O_N_STAN_T"); + Ok(()) + } + + #[test] + fn parse_constant_name_should_fail_if_starting_with_underscore() { + assert!(constant_name("_C_O_N_STAN_Ta").is_err()); + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/literal.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/literal.rs new file mode 100644 index 0000000000000000000000000000000000000000..03ed7450d6288347a28551ec12371ddbb9480e52 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/literal.rs @@ -0,0 +1,295 @@ +use std::convert::TryFrom; + +use nom::{ + branch::alt, + bytes::complete::{is_not, tag, tag_no_case, take_while}, + character::complete::{anychar, char, digit1, hex_digit1, none_of, oct_digit1, one_of, space0}, + combinator::{eof, map, map_res, opt, recognize, rest, value, verify}, + multi::{many0, separated_list1}, + number::complete::recognize_float, + sequence::{delimited, pair, tuple}, + IResult, +}; + +use crate::types::primitives::{BasicType, GenericString}; + +pub fn usize_literal(s: &str) -> IResult<&str, usize> { + map_res(dec_literal, usize::try_from)(s) +} + +fn validate_integer_literal(s: &str) -> IResult<&str, String> +where + T: TryFrom + ToString, +{ + map_res(integer_literal, |v| T::try_from(v).map(|v| v.to_string()))(s) +} + +fn validate_floating_point_literal(s: &str) -> IResult<&str, String> { + map(recognize_float, |v: &str| v.to_string())(s) +} + +fn validate_boolean_literal(s: &str) -> IResult<&str, String> { + map(bool_literal, |v| v.to_string())(s) +} + +pub fn get_basic_type_literal_parser(basic_type: BasicType) -> fn(&str) -> IResult<&str, String> { + match basic_type { + BasicType::U8 | BasicType::Char | BasicType::Byte => validate_integer_literal::, + BasicType::U16 => validate_integer_literal::, + BasicType::U32 => validate_integer_literal::, + BasicType::U64 => validate_integer_literal::, + BasicType::I8 => validate_integer_literal::, + BasicType::I16 => validate_integer_literal::, + BasicType::I32 => validate_integer_literal::, + BasicType::I64 => validate_integer_literal::, + BasicType::F32 | BasicType::F64 => validate_floating_point_literal, + BasicType::Bool => validate_boolean_literal, + } +} + +pub fn basic_type_sequence(basic_type: BasicType, s: &str) -> IResult<&str, Vec> { + delimited( + pair(char('['), space0), + separated_list1( + char(','), + delimited(space0, get_basic_type_literal_parser(basic_type), space0), + ), + pair(space0, char(']')), + )(s) +} + +#[inline] +fn flag_if_exist(s: &str) -> IResult<&str, char> { + map(opt(one_of("+-")), |flag| flag.unwrap_or('+'))(s) +} + +fn dec_literal(s: &str) -> IResult<&str, i128> { + map_res( + tuple((flag_if_exist, separated_list1(char('_'), digit1))), + |(flag, digits)| format!("{}{}", flag, digits.join("")).parse::(), + )(s) +} + +fn integer_literal(s: &str) -> IResult<&str, i128> { + alt(( + map_res( + tuple(( + flag_if_exist, + tag_no_case("0b"), + separated_list1(char('_'), take_while(|c| c == '0' || c == '1')), + )), + |(flag, _, digits)| i128::from_str_radix(&format!("{}{}", flag, digits.join("")), 2), + ), + map_res( + tuple(( + flag_if_exist, + tag_no_case("0o"), + separated_list1(char('_'), oct_digit1), + )), + |(flag, _, digits)| i128::from_str_radix(&format!("{}{}", flag, digits.join("")), 8), + ), + map_res( + tuple(( + flag_if_exist, + tag_no_case("0x"), + separated_list1(char('_'), hex_digit1), + )), + |(flag, _, digits)| i128::from_str_radix(&format!("{}{}", flag, digits.join("")), 16), + ), + dec_literal, + ))(s) +} + +fn bool_literal(s: &str) -> IResult<&str, bool> { + alt(( + value(true, alt((tag("true"), tag("1")))), + value(false, alt((tag("false"), tag("0")))), + ))(s) +} + +#[allow(clippy::type_complexity)] +pub fn get_string_literal_parser( + string_type: GenericString, +) -> Box IResult<&str, String>> { + match string_type { + GenericString::String | GenericString::WString => Box::new(string_literal), + GenericString::BoundedString(max_size) | GenericString::BoundedWString(max_size) => { + Box::new(move |s| verify(string_literal, |s: &str| s.len() <= max_size)(s)) + } + } +} + +fn string_literal(s: &str) -> IResult<&str, String> { + alt(( + delimited( + char('"'), + map( + many0(alt(( + value(r#"""#, tag(r#"\""#)), + tag(r#"\"#), + recognize(is_not(r#"\""#)), + ))), + |v| v.join("").trim().to_string(), + ), + char('"'), + ), + delimited( + char('\''), + map( + many0(alt(( + value("'", tag(r#"\'"#)), + tag(r#"\"#), + recognize(is_not(r#"\'"#)), + ))), + |v| v.join("").trim().to_string(), + ), + char('\''), + ), + value("".to_string(), one_of(r#""'"#)), + map( + verify(recognize(many0(anychar)), |v: &str| { + let v = v.trim(); + !(v.starts_with('"') && v.ends_with('"') + || v.starts_with('\'') && v.ends_with('\'')) + }), + |v: &str| v.trim().to_string(), + ), + ))(s) +} + +pub fn string_literal_sequence(s: &str) -> IResult<&str, Vec> { + verify(rest, |v: &str| v.starts_with('[') && v.ends_with(']'))(s)?; + + delimited( + space0, + separated_list1( + char(','), + delimited( + space0, + alt(( + delimited( + char('"'), + map( + many0(alt(( + value(r#"""#, tag(r#"\""#)), + tag(r#"\"#), + recognize(is_not(r#"\""#)), + ))), + |v| v.join("").trim().to_string(), + ), + char('"'), + ), + delimited( + char('\''), + map( + many0(alt(( + value("'", tag(r#"\'"#)), + tag(r#"\"#), + recognize(is_not(r#"\'"#)), + ))), + |v| v.join("").trim().to_string(), + ), + char('\''), + ), + map( + recognize(pair(none_of("\"',"), opt(is_not(",")))), + |s: &str| s.trim().to_string(), + ), + )), + space0, + ), + ), + tuple((opt(char(',')), space0, eof)), + )(s.strip_prefix('[').unwrap().strip_suffix(']').unwrap()) +} + +#[cfg(test)] +mod test { + use anyhow::Result; + + use super::*; + + #[test] + fn parse_integer_literal() -> Result<()> { + assert_eq!(integer_literal("101_010")?.1, 101010); + Ok(()) + } + + #[test] + fn parse_bin_literal() -> Result<()> { + assert_eq!(integer_literal("0b101_010")?.1, 0b101010); + assert_eq!(integer_literal("+0b101_010")?.1, 0b101010); + assert_eq!(integer_literal("-0b101_010")?.1, -0b101010); + Ok(()) + } + + #[test] + fn parse_oct_literal() -> Result<()> { + assert_eq!(integer_literal("0o12_345_670")?.1, 0o12345670); + assert_eq!(integer_literal("+0o12_345_670")?.1, 0o12345670); + assert_eq!(integer_literal("-0o12_345_670")?.1, -0o12345670); + Ok(()) + } + + #[test] + fn parse_dec_literal() -> Result<()> { + assert_eq!(integer_literal("123_456_789")?.1, 123456789); + assert_eq!(integer_literal("+123_456_789")?.1, 123456789); + assert_eq!(integer_literal("-123_456_789")?.1, -123456789); + Ok(()) + } + + #[test] + fn parse_hex_literal() -> Result<()> { + assert_eq!(integer_literal("0x789_aBc")?.1, 0x789abc); + assert_eq!(integer_literal("+0x789_aBc")?.1, 0x789abc); + assert_eq!(integer_literal("-0x789_aBc")?.1, -0x789abc); + Ok(()) + } + + #[test] + fn parse_bool_literal() -> Result<()> { + assert!(bool_literal("true")?.1); + assert!(!bool_literal("false")?.1); + assert!(bool_literal("1")?.1); + assert!(!bool_literal("0")?.1); + Ok(()) + } + + #[test] + fn parse_integer_sequenc() -> Result<()> { + assert_eq!( + basic_type_sequence(BasicType::I8, "[-1, 0x10, 0o10, -0b10]")?.1, + vec!["-1", "16", "8", "-2"] + ); + Ok(()) + } + + #[test] + fn parse_string() -> Result<()> { + assert_eq!(string_literal(r#""aaa\"aaa" "#)?.1, r#"aaa"aaa"#); + assert_eq!(string_literal(r#"'aaa\'aaa' "#)?.1, "aaa'aaa"); + Ok(()) + } + + #[test] + fn parse_string_sequence() -> Result<()> { + assert_eq!( + string_literal_sequence(r#"[aaa, "bbb", 'ccc']"#)?.1, + vec!["aaa", "bbb", "ccc"] + ); + assert_eq!( + string_literal_sequence(r#"[aaa, "bbb", 'ccc',]"#)?.1, + vec!["aaa", "bbb", "ccc"] + ); + assert_eq!( + string_literal_sequence(r#"["aaa, \"bbb", 'ccc']"#)?.1, + vec![r#"aaa, "bbb"#, "ccc"] + ); + assert_eq!( + string_literal_sequence(r#"[ aaa , "bbb" , 'ccc' ]"#)?.1, + vec!["aaa", "bbb", "ccc"] + ); + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/member.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/member.rs new file mode 100644 index 0000000000000000000000000000000000000000..c52380e9117c8fba3fb0fd8bae0c6d64859cd537 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/member.rs @@ -0,0 +1,133 @@ +use anyhow::{ensure, Result}; +use nom::{ + bytes::complete::is_not, + character::complete::{space0, space1}, + combinator::{eof, opt, recognize}, + multi::separated_list1, + sequence::{preceded, tuple}, +}; + +use super::{error::RclMsgError, ident, literal, types}; +use crate::types::{primitives::NestableType, Member, MemberType}; + +fn nestable_type_default(nestable_type: NestableType, default: &str) -> Result> { + match nestable_type { + NestableType::BasicType(t) => { + let (rest, default) = literal::get_basic_type_literal_parser(t)(default) + .map_err(|_| RclMsgError::ParseDefaultValueError(default.into()))?; + ensure!(rest.is_empty()); + Ok(vec![default]) + } + NestableType::NamedType(t) => { + Err(RclMsgError::InvalidDefaultError(format!("{}", t)).into()) + } + NestableType::NamespacedType(t) => { + Err(RclMsgError::InvalidDefaultError(format!("{}", t)).into()) + } + NestableType::GenericString(t) => { + let (rest, default) = literal::get_string_literal_parser(t)(default) + .map_err(|_| RclMsgError::ParseDefaultValueError(default.into()))?; + ensure!(rest.is_empty()); + Ok(vec![default]) + } + } +} + +fn array_type_default(value_type: NestableType, default: &str) -> Result> { + match value_type { + NestableType::BasicType(t) => { + let (rest, default) = literal::basic_type_sequence(t, default) + .map_err(|_| RclMsgError::ParseDefaultValueError(default.into()))?; + ensure!(rest.is_empty()); + Ok(default) + } + NestableType::NamedType(t) => { + Err(RclMsgError::InvalidDefaultError(format!("{}", t)).into()) + } + NestableType::NamespacedType(t) => { + Err(RclMsgError::InvalidDefaultError(format!("{}", t)).into()) + } + NestableType::GenericString(_) => { + let (rest, default) = literal::string_literal_sequence(default) + .map_err(|_| RclMsgError::ParseDefaultValueError(default.into()))?; + ensure!(rest.is_empty()); + Ok(default) + } + } +} + +fn validate_default(r#type: MemberType, default: &str) -> Result> { + match r#type { + MemberType::NestableType(t) => nestable_type_default(t, default), + MemberType::Array(t) => { + let default = array_type_default(t.value_type, default)?; + ensure!(default.len() == t.size); + Ok(default) + } + MemberType::Sequence(t) => array_type_default(t.value_type, default), + MemberType::BoundedSequence(t) => { + let default = array_type_default(t.value_type, default)?; + ensure!(default.len() <= t.max_size); + Ok(default) + } + } +} + +pub fn member_def(line: &str) -> Result { + let (_, (r#type, _, name, default, _, _)) = tuple(( + types::parse_member_type, + space1, + ident::member_name, + opt(preceded( + space1, + recognize(separated_list1(space1, is_not(" \t"))), + )), + space0, + eof, + ))(line) + .map_err(|e| RclMsgError::ParseMemberError { + input: line.into(), + reason: e.to_string(), + })?; + + Ok(Member { + name: name.into(), + r#type: r#type.clone(), + default: match default { + Some(v) => Some(validate_default(r#type, v)?), + None => None, + }, + }) +} + +#[cfg(test)] +mod test { + use anyhow::Result; + + use super::*; + use crate::types::primitives::BasicType; + + #[test] + fn parse_member_def() -> Result<()> { + let result = member_def("int32 aaa")?; + assert_eq!(result.name, "aaa"); + assert_eq!(result.r#type, BasicType::I32.into()); + Ok(()) + } + + #[test] + fn parse_member_def_with_default() -> Result<()> { + let result = member_def("int32 aaa 30")?; + assert_eq!(result.name, "aaa"); + assert_eq!(result.r#type, BasicType::I32.into()); + assert_eq!(result.default, Some(vec!["30".into()])); + Ok(()) + } + + #[test] + fn parse_member_def_with_invalid_default() -> Result<()> { + assert!(member_def("uint8 aaa -1").is_err()); + assert!(member_def("uint8 aaa 256").is_err()); + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/message.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/message.rs new file mode 100644 index 0000000000000000000000000000000000000000..c961b97322c3d7c2e3bea9fda7742b09a61d9af8 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/message.rs @@ -0,0 +1,160 @@ +use std::{fs, path::Path}; + +use anyhow::{Context, Result}; + +use super::{constant::constant_def, member::member_def}; +use crate::types::Message; + +fn split_once(s: &'_ str, pat: char) -> (&'_ str, Option<&'_ str>) { + let mut items = s.splitn(2, pat); + (items.next().unwrap(), items.next()) +} + +pub fn parse_message_file>(pkg_name: &str, interface_file: P) -> Result { + parse_message_string( + pkg_name, + interface_file + .as_ref() + .file_stem() + .unwrap() + .to_str() + .unwrap(), + fs::read_to_string(interface_file.as_ref())?.as_str(), + ) + .with_context(|| format!("Parse file error: {}", interface_file.as_ref().display())) +} + +pub fn parse_message_string( + pkg_name: &str, + msg_name: &str, + message_string: &str, +) -> Result { + let mut members = vec![]; + let mut constants = vec![]; + + for line in message_string.lines() { + let (line, _) = split_once(line, '#'); + let line = line.trim(); + if line.is_empty() { + continue; + } + + let (_, rest) = split_once(line, ' '); + + match rest.unwrap().find('=') { + Some(_) => constants.push(constant_def(line)?), + None => members.push(member_def(line)?), + } + } + + Ok(Message { + package: pkg_name.into(), + name: msg_name.into(), + members, + constants, + }) +} + +#[cfg(test)] +mod test { + use std::path::PathBuf; + + use super::*; + use crate::types::{primitives::*, sequences::*, *}; + + #[test] + fn test_split_once() { + assert_eq!(split_once("abc", 'b'), ("a", Some("c"))); + assert_eq!(split_once("abc", 'c'), ("ab", Some(""))); + assert_eq!(split_once("abc", 'd'), ("abc", None)); + } + + fn parse_msg_def(msg_name: &str) -> Result { + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join(format!("test_msgs/msg/{}.msg", msg_name)); + parse_message_file("test_msgs", path) + } + + #[test] + fn parse_arrays() -> Result<()> { + let message = parse_msg_def("Arrays")?; + + assert_eq!(message.package, "test_msgs".to_string()); + assert_eq!(message.name, "Arrays".to_string()); + assert_eq!(message.members[0].name, "bool_values".to_string()); + assert_eq!( + message.members[0].r#type, + MemberType::Array(Array { + value_type: BasicType::Bool.into(), + size: 3, + }) + ); + + Ok(()) + } + + #[test] + fn parse_basic_types() -> Result<()> { + let result = parse_msg_def("BasicTypes")?; + + assert_eq!(result.members[0].name, "bool_value".to_string()); + assert_eq!(result.members[0].r#type, BasicType::Bool.into()); + assert_eq!(result.members[0].default, None); + + Ok(()) + } + + #[test] + fn parse_bounded_sequences() -> Result<()> { + let _result = parse_msg_def("BoundedSequences")?; + Ok(()) + } + + #[test] + fn parse_constants() -> Result<()> { + let _result = parse_msg_def("Constants")?; + Ok(()) + } + + #[test] + fn parse_defaults() -> Result<()> { + let _result = parse_msg_def("Defaults")?; + Ok(()) + } + + #[test] + fn parse_empty() -> Result<()> { + let _result = parse_msg_def("Empty")?; + Ok(()) + } + + #[test] + fn parse_multi_nested() -> Result<()> { + let _result = parse_msg_def("MultiNested")?; + Ok(()) + } + + #[test] + fn parse_nested() -> Result<()> { + let _result = parse_msg_def("Nested")?; + Ok(()) + } + + #[test] + fn parse_strings() -> Result<()> { + let _result = parse_msg_def("Strings")?; + Ok(()) + } + + #[test] + fn parse_unbounded_sequences() -> Result<()> { + let _result = parse_msg_def("UnboundedSequences")?; + Ok(()) + } + + #[test] + fn parse_wstrings() -> Result<()> { + let _result = parse_msg_def("WStrings")?; + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/mod.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..787fee1345ce1be07d75db0073d8d2139e10a2f0 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/mod.rs @@ -0,0 +1,12 @@ +pub mod action; +pub mod constant; +pub mod error; +pub mod ident; +pub mod literal; +pub mod member; +pub mod message; +mod package; +pub mod service; +pub mod types; + +pub use package::get_packages; diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/package.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/package.rs new file mode 100644 index 0000000000000000000000000000000000000000..8900a722eb88fde3df17b98a9efac451e1ce3741 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/package.rs @@ -0,0 +1,106 @@ +use std::{collections::HashMap, path::Path}; + +use anyhow::{Context, Result}; +use glob::glob; +use tracing::warn; + +use super::{action::parse_action_file, message::parse_message_file, service::parse_service_file}; +use crate::types::Package; + +fn get_ros_msgs_each_package>(root_dir: P) -> Result> { + let mut map: HashMap = HashMap::new(); + + let ros_formats = vec!["msg", "srv", "action"]; + + // Return empty vector if root_dir is empty + if root_dir.as_ref() == Path::new("") { + let empty_vec: Vec = vec![]; + warn!("AMENT_PREFIX_PATH pointed to ''"); + return Ok(empty_vec); + } + + for ros_format in ros_formats { + let pattern = root_dir.as_ref().to_string_lossy().to_string() + + "/**/" + + ros_format + + "/*." + + ros_format; + let mut visited_files = vec![]; + for entry in glob(&pattern).context("Failed to read glob pattern")? { + let path = entry.context("Could not glob given path")?; + let file_name = path + .clone() + .file_name() + .unwrap() + .to_str() + .unwrap() + .to_string(); + + let package = path + .parent() + .context("Should have a msg folder")? + .parent() + .context("should have a package folder")? + .file_name() + .context("folder name should exist")? + .to_string_lossy() + .to_string(); + + // Hack + if file_name == "libstatistics_collector" { + continue; + } else if visited_files.contains(&(package.clone(), file_name.clone())) { + warn!( + "found two versions of package: {:?}, message: {:?}. will skip the one in: {:#?}", + package, file_name, path + ); + continue; + } else { + visited_files.push((package.clone(), file_name.clone())); + } + + let p = map + .entry(package.clone()) + .or_insert_with(|| Package::new(package.clone())); + + match ros_format { + "msg" => { + p.messages.push(parse_message_file(&package, path.clone())?); + } + "srv" => { + p.services.push(parse_service_file(&package, path.clone())?); + } + "action" => { + p.actions.push(parse_action_file(&package, path.clone())?); + } + _ => todo!(), + } + } + } + debug_assert!( + !map.is_empty(), + "it seens that no package was generated from your AMENT_PREFIX_PATH directory" + ); + + let packages = map.into_values().collect(); + Ok(packages) +} + +pub fn get_packages

(paths: &[P]) -> Result> +where + P: AsRef, +{ + let mut packages = paths + .iter() + .map(get_ros_msgs_each_package) + .collect::>>()? + .into_iter() + .flatten() + .filter(|p| !p.is_empty()) + .collect::>(); + + packages.sort_by_key(|p| p.name.clone()); + packages.dedup_by_key(|p| p.name.clone()); + + Ok(packages) +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/service.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..033b5b10344157e09500d1a255617dee0808e1a4 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/service.rs @@ -0,0 +1,86 @@ +use std::{fs, path::Path}; + +use anyhow::{Context, Result}; +use regex::Regex; + +use super::{error::RclMsgError, message::parse_message_string}; +use crate::types::Service; + +const SERVICE_REQUEST_SUFFIX: &str = "_Request"; +const SERVICE_RESPONSE_SUFFIX: &str = "_Response"; + +pub fn parse_service_file>(pkg_name: &str, interface_file: P) -> Result { + let interface_file = interface_file.as_ref(); + let service_string = fs::read_to_string(interface_file)?.replace("\r\n", "\n"); + + parse_service_string( + pkg_name, + interface_file.file_stem().unwrap().to_str().unwrap(), + &service_string, + ) + .with_context(|| format!("Parse file error: {}", interface_file.display())) +} + +fn parse_service_string(pkg_name: &str, srv_name: &str, service_string: &str) -> Result { + let re = Regex::new(r"(?m)^---$").unwrap(); + let service_blocks: Vec<_> = re.split(service_string).collect(); + if service_blocks.len() != 2 { + return Err(RclMsgError::InvalidServiceSpecification(format!( + "Expect one '---' separator in {}/{} service definition, but get {}", + pkg_name, + srv_name, + service_blocks.len() - 1 + )) + .into()); + } + + Ok(Service { + package: pkg_name.into(), + name: srv_name.into(), + request: parse_message_string( + pkg_name, + &format!("{}{}", srv_name, SERVICE_REQUEST_SUFFIX), + service_blocks[0], + )?, + response: parse_message_string( + pkg_name, + &format!("{}{}", srv_name, SERVICE_RESPONSE_SUFFIX), + service_blocks[1], + )?, + }) +} + +#[cfg(test)] +mod test { + use std::path::PathBuf; + + use super::*; + + fn parse_srv_def(srv_name: &str) -> Result { + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join(format!("test_msgs/srv/{}.srv", srv_name)); + parse_service_file("test_msgs", path) + } + + #[test] + fn parse_arrays() -> Result<()> { + let result = parse_srv_def("Arrays")?; + assert_eq!(result.package, "test_msgs".to_string()); + assert_eq!(result.name, "Arrays".to_string()); + assert_eq!(result.request.name, "Arrays_Request".to_string()); + assert_eq!(result.response.name, "Arrays_Response".to_string()); + Ok(()) + } + + #[test] + fn parse_basic_types() -> Result<()> { + let _result = parse_srv_def("BasicTypes")?; + Ok(()) + } + + #[test] + fn parse_empty() -> Result<()> { + let _result = parse_srv_def("Empty")?; + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/parser/types.rs b/libraries/extensions/ros2-bridge/msg-gen/src/parser/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..4561598c72b2e1572aa73c15cd6d152f0ba36626 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/parser/types.rs @@ -0,0 +1,349 @@ +use anyhow::anyhow; +use nom::{ + branch::alt, + bytes::complete::tag, + character::complete::{char, space1}, + combinator::{eof, map, map_res, opt, peek}, + sequence::{delimited, pair, preceded, tuple}, + IResult, +}; + +use super::{ + ident::{message_name, package_name}, + literal::usize_literal, +}; +use crate::types::{ + primitives::*, + sequences::{Array, BoundedSequence, PrimitiveArray, Sequence}, + ConstantType, MemberType, +}; + +pub fn parse_member_type(s: &str) -> IResult<&str, MemberType> { + map_res( + tuple(( + nestable_type, + opt(delimited( + char('['), + pair(opt(tag("<=")), opt(usize_literal)), + char(']'), + )), + peek(alt((space1, eof))), + )), + |(value_type, seq_info, _)| { + Ok(match seq_info { + None => value_type.into(), + Some((None, None)) => Sequence { value_type }.into(), + Some((None, Some(size))) => Array { value_type, size }.into(), + Some((Some(_), Some(size))) => BoundedSequence { + value_type, + max_size: size, + } + .into(), + Some((Some(_), None)) => { + return Err(anyhow!("max_size should be specified")); + } + }) + }, + )(s) +} + +pub fn parse_constant_type(s: &str) -> IResult<&str, ConstantType> { + map( + tuple(( + primitive_type, + opt(delimited(char('['), usize_literal, char(']'))), + peek(alt((space1, eof))), + )), + |(value_type, size, _)| { + size.map_or_else( + || value_type.into(), + |size| PrimitiveArray { value_type, size }.into(), + ) + }, + )(s) +} + +fn basic_type(s: &str) -> IResult<&str, BasicType> { + map( + alt(( + tag("uint8"), + tag("uint16"), + tag("uint32"), + tag("uint64"), + tag("int8"), + tag("int16"), + tag("int32"), + tag("int64"), + tag("int64"), + tag("int64"), + tag("float32"), + tag("float64"), + tag("bool"), + tag("char"), + tag("byte"), + )), + |s| BasicType::parse(s).unwrap(), + )(s) +} + +fn named_type(s: &str) -> IResult<&str, NamedType> { + map(message_name, |name| NamedType(name.into()))(s) +} + +fn namespaced_type(s: &str) -> IResult<&str, NamespacedType> { + map( + tuple((package_name, char('/'), message_name)), + |(package, _, name)| NamespacedType { + package: package.into(), + namespace: "msg".into(), + name: name.into(), + }, + )(s) +} + +fn generic_string(s: &str) -> IResult<&str, GenericString> { + map( + pair( + alt((tag("string"), tag("wstring"))), + opt(preceded(tag("<="), usize_literal)), + ), + |(type_str, array_info)| { + array_info.map_or_else( + || match type_str { + "string" => GenericString::String, + "wstring" => GenericString::WString, + _ => unreachable!(), + }, + |max_size| match type_str { + "string" => GenericString::BoundedString(max_size), + "wstring" => GenericString::BoundedWString(max_size), + _ => unreachable!(), + }, + ) + }, + )(s) +} + +fn generic_unbounded_string(s: &str) -> IResult<&str, GenericUnboundedString> { + map( + alt((tag("string"), tag("wstring"))), + |type_str| match type_str { + "string" => GenericUnboundedString::String, + "wstring" => GenericUnboundedString::WString, + _ => unreachable!(), + }, + )(s) +} + +fn nestable_type(s: &str) -> IResult<&str, NestableType> { + alt(( + map(basic_type, |type_| type_.into()), + map(generic_string, |type_| type_.into()), + map(namespaced_type, |type_| type_.into()), + map(named_type, |type_| type_.into()), + ))(s) +} + +fn primitive_type(s: &str) -> IResult<&str, PrimitiveType> { + alt(( + map(basic_type, |type_| type_.into()), + map(generic_unbounded_string, |type_| type_.into()), + ))(s) +} + +#[cfg(test)] +mod test { + use anyhow::Result; + + use super::*; + + #[test] + fn test_parse_member_type_basic_type() -> Result<()> { + assert_eq!(parse_member_type("int8")?.1, BasicType::I8.into()); + assert_eq!(parse_member_type("int16")?.1, BasicType::I16.into()); + assert_eq!(parse_member_type("int32")?.1, BasicType::I32.into()); + assert_eq!(parse_member_type("int64")?.1, BasicType::I64.into()); + assert_eq!(parse_member_type("uint8")?.1, BasicType::U8.into()); + assert_eq!(parse_member_type("uint16")?.1, BasicType::U16.into()); + assert_eq!(parse_member_type("uint32")?.1, BasicType::U32.into()); + assert_eq!(parse_member_type("uint64")?.1, BasicType::U64.into()); + assert_eq!(parse_member_type("float32")?.1, BasicType::F32.into()); + assert_eq!(parse_member_type("float64")?.1, BasicType::F64.into()); + assert_eq!(parse_member_type("bool")?.1, BasicType::Bool.into()); + assert_eq!(parse_member_type("char")?.1, BasicType::Char.into()); + assert_eq!(parse_member_type("byte")?.1, BasicType::Byte.into()); + Ok(()) + } + + #[test] + fn test_parse_member_type_named_type() -> Result<()> { + assert_eq!(parse_member_type("ABC")?.1, NamedType("ABC".into()).into()); + Ok(()) + } + + #[test] + fn test_parse_member_type_namespaced_type() -> Result<()> { + assert_eq!( + parse_member_type("std_msgs/Bool")?.1, + NamespacedType { + package: "std_msgs".into(), + namespace: "msg".into(), + name: "Bool".into() + } + .into() + ); + Ok(()) + } + + #[test] + fn test_parse_member_type_generic_string() -> Result<()> { + assert_eq!(parse_member_type("string")?.1, GenericString::String.into()); + assert_eq!( + parse_member_type("wstring")?.1, + GenericString::WString.into() + ); + assert_eq!( + parse_member_type("string<=5")?.1, + GenericString::BoundedString(5).into() + ); + assert_eq!( + parse_member_type("wstring<=5")?.1, + GenericString::BoundedWString(5).into() + ); + Ok(()) + } + + #[test] + fn test_parse_member_type_array() -> Result<()> { + assert_eq!( + parse_member_type("string[5]")?.1, + Array { + value_type: GenericString::String.into(), + size: 5, + } + .into() + ); + assert_eq!( + parse_member_type("string<=6[5]")?.1, + Array { + value_type: GenericString::BoundedString(6).into(), + size: 5, + } + .into() + ); + Ok(()) + } + + #[test] + fn test_parse_member_type_sequence() -> Result<()> { + assert_eq!( + parse_member_type("string[]")?.1, + Sequence { + value_type: GenericString::String.into(), + } + .into() + ); + assert_eq!( + parse_member_type("string<=6[]")?.1, + Sequence { + value_type: GenericString::BoundedString(6).into(), + } + .into() + ); + Ok(()) + } + + #[test] + fn test_parse_member_type_bounded_sequence() -> Result<()> { + assert_eq!( + parse_member_type("string[<=5]")?.1, + BoundedSequence { + value_type: GenericString::String.into(), + max_size: 5, + } + .into() + ); + assert_eq!( + parse_member_type("string<=6[<=5]")?.1, + BoundedSequence { + value_type: GenericString::BoundedString(6).into(), + max_size: 5, + } + .into() + ); + Ok(()) + } + + #[test] + fn test_parse_constant_type_basic_type() -> Result<()> { + assert_eq!(parse_constant_type("int8")?.1, BasicType::I8.into()); + assert_eq!(parse_constant_type("int16")?.1, BasicType::I16.into()); + assert_eq!(parse_constant_type("int32")?.1, BasicType::I32.into()); + assert_eq!(parse_constant_type("int64")?.1, BasicType::I64.into()); + assert_eq!(parse_constant_type("uint8")?.1, BasicType::U8.into()); + assert_eq!(parse_constant_type("uint16")?.1, BasicType::U16.into()); + assert_eq!(parse_constant_type("uint32")?.1, BasicType::U32.into()); + assert_eq!(parse_constant_type("uint64")?.1, BasicType::U64.into()); + assert_eq!(parse_constant_type("float32")?.1, BasicType::F32.into()); + assert_eq!(parse_constant_type("float64")?.1, BasicType::F64.into()); + assert_eq!(parse_constant_type("bool")?.1, BasicType::Bool.into()); + assert_eq!(parse_constant_type("char")?.1, BasicType::Char.into()); + assert_eq!(parse_constant_type("byte")?.1, BasicType::Byte.into()); + Ok(()) + } + + #[test] + fn test_parse_constant_type_named_type() -> Result<()> { + assert!(parse_constant_type("ABC").is_err()); + Ok(()) + } + + #[test] + fn test_parse_constant_type_namespaced_type() -> Result<()> { + assert!(parse_constant_type("std_msgs/Bool").is_err()); + Ok(()) + } + + #[test] + fn test_parse_constant_type_generic_string() -> Result<()> { + assert_eq!( + parse_constant_type("string")?.1, + GenericUnboundedString::String.into() + ); + assert_eq!( + parse_constant_type("wstring")?.1, + GenericUnboundedString::WString.into() + ); + assert!(parse_constant_type("string<=5").is_err()); + assert!(parse_constant_type("wstring<=5").is_err()); + Ok(()) + } + + #[test] + fn test_parse_constant_type_array() -> Result<()> { + assert_eq!( + parse_constant_type("string[5]")?.1, + PrimitiveArray { + value_type: GenericUnboundedString::String.into(), + size: 5, + } + .into() + ); + assert!(parse_constant_type("string<=6[5]").is_err()); + Ok(()) + } + + #[test] + fn test_parse_constant_type_sequence() -> Result<()> { + assert!(parse_constant_type("string[]").is_err()); + assert!(parse_constant_type("string<=6[]").is_err()); + Ok(()) + } + + #[test] + fn test_parse_const_type_bounded_sequence() -> Result<()> { + assert!(parse_constant_type("string[<=5]").is_err()); + assert!(parse_constant_type("string<=6[<=5]").is_err()); + Ok(()) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/action.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/action.rs new file mode 100644 index 0000000000000000000000000000000000000000..9338abf845863f239102f9cc94a1378b74c8e24a --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/action.rs @@ -0,0 +1,237 @@ +use heck::SnakeCase; +use quote::{format_ident, quote, ToTokens}; + +use super::{primitives::*, Member, Message, Service}; + +/// An action definition +#[derive(Debug, Clone)] +pub struct Action { + /// The name of The package + pub package: String, + /// The name of The action + pub name: String, + /// The type of the goal + pub goal: Message, + /// The type of the result + pub result: Message, + /// The type of the feedback + pub feedback: Message, +} + +impl Action { + fn send_goal_srv(&self) -> Service { + let common = format!("{}_SendGoal", self.name); + + let request = Message { + package: self.package.clone(), + name: format!("{}_Request", common), + members: vec![ + goal_id_type(), + Member { + name: "goal".into(), + r#type: NamespacedType { + package: self.package.clone(), + namespace: "action".into(), + name: format!("{}_Goal", self.name), + } + .into(), + default: None, + }, + ], + constants: vec![], + }; + let response = Message { + package: self.package.clone(), + name: format!("{}_Response", common), + members: vec![ + Member { + name: "accepted".into(), + r#type: BasicType::Bool.into(), + default: None, + }, + Member { + name: "stamp".into(), + r#type: NamespacedType { + package: "builtin_interfaces".into(), + namespace: "msg".into(), + name: "Time".into(), + } + .into(), + default: None, + }, + ], + constants: vec![], + }; + + Service { + package: self.package.clone(), + name: common, + request, + response, + } + } + + fn get_result_srv(&self) -> Service { + let common = format!("{}_GetResult", self.name); + + let request = Message { + package: self.package.clone(), + name: format!("{}_Request", common), + members: vec![goal_id_type()], + constants: vec![], + }; + let response = Message { + package: self.package.clone(), + name: format!("{}_Response", common), + members: vec![ + Member { + name: "status".into(), + r#type: BasicType::I8.into(), + default: None, + }, + Member { + name: "result".into(), + r#type: NamespacedType { + package: self.package.clone(), + namespace: "action".into(), + name: format!("{}_Result", self.name), + } + .into(), + default: None, + }, + ], + constants: vec![], + }; + + Service { + package: self.package.clone(), + name: common, + request, + response, + } + } + + fn feedback_message_msg(&self) -> Message { + Message { + package: self.package.clone(), + name: format!("{}_FeedbackMessage", self.name), + members: vec![ + goal_id_type(), + Member { + name: "feedback".into(), + r#type: NamespacedType { + package: self.package.clone(), + namespace: "action".into(), + name: format!("{}_Feedback", self.name), + } + .into(), + default: None, + }, + ], + constants: vec![], + } + } + + pub fn token_stream_with_mod(&self) -> impl ToTokens { + let mod_name = format_ident!("_{}", self.name.to_snake_case()); + let inner = self.token_stream(); + quote! { + pub use #mod_name::*; + mod #mod_name { + #inner + } + } + } + + pub fn token_stream(&self) -> impl ToTokens { + let action_type = format_ident!("{}", self.name); + let goal_type = format_ident!("{}_Goal", self.name); + let result_type = format_ident!("{}_Result", self.name); + let feedback_type = format_ident!("{}_Feedback", self.name); + let send_goal_type = format_ident!("{}_SendGoal", self.name); + let get_result_type = format_ident!("{}_GetResult", self.name); + let feedback_message_type = format_ident!("{}_FeedbackMessage", self.name); + + let goal_body = self.goal.token_stream(); + let result_body = self.result.token_stream(); + let feedback_body = self.feedback.token_stream(); + let send_goal_body = self.send_goal_srv().token_stream(); + let get_result_body = self.get_result_srv().token_stream(); + let feedback_message_body = self.feedback_message_msg().token_stream(); + + quote! { + use std::os::raw::c_void; + + pub use self::goal::*; + pub use self::result::*; + pub use self::feedback::*; + pub use self::send_goal::*; + pub use self::get_result::*; + pub use self::feedback_message::*; + + #[allow(non_camel_case_types)] + #[derive(std::fmt::Debug)] + pub struct #action_type; + + + impl crate::_core::ActionT for #action_type { + type Goal = #goal_type; + type Result = #result_type; + type Feedback = #feedback_type; + type SendGoal = #send_goal_type; + type GetResult = #get_result_type; + type FeedbackMessage = #feedback_message_type; + + } + + mod goal { + #goal_body + } // mod goal + + mod result { + #result_body + } // mod result + + mod feedback { + #feedback_body + } // mod feedback + + mod send_goal { + #send_goal_body + } // mod send_goal + + mod get_result { + #get_result_body + } // mod get_result + + mod feedback_message { + #feedback_message_body + } // mod feedback_message + + #[cfg(test)] + mod test { + use super::*; + use crate::_core::ActionT; + + #[test] + fn test_type_support() { + let ptr = #action_type::type_support(); + assert!(!ptr.is_null()); + } + } + } + } +} + +fn goal_id_type() -> Member { + Member { + name: "goal_id".into(), + r#type: NamespacedType { + package: "unique_identifier_msgs".into(), + namespace: "msg".into(), + name: "UUID".into(), + } + .into(), + default: None, + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/constant.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/constant.rs new file mode 100644 index 0000000000000000000000000000000000000000..0db066179a681f2cbe3b3d8ab9f2ef750d3edc9d --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/constant.rs @@ -0,0 +1,68 @@ +use quote::{quote, ToTokens}; + +use super::{ + primitives::{BasicType, GenericUnboundedString, PrimitiveType}, + sequences::PrimitiveArray, +}; + +macro_rules! define_enum_from { + ($into_t:ty, $from_t:ty, $path:path) => { + impl From<$from_t> for $into_t { + fn from(t: $from_t) -> Self { + $path(t) + } + } + }; +} + +/// A type which is available for constant +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ConstantType { + PrimitiveType(PrimitiveType), + PrimitiveArray(PrimitiveArray), +} + +impl ConstantType { + pub fn type_tokens(&self) -> impl ToTokens { + match self { + Self::PrimitiveType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::PrimitiveArray(t) => { + let token = t.type_tokens(); + quote! { #token } + } + } + } + + pub fn value_tokens(&self, values: &[String]) -> impl ToTokens { + match self { + Self::PrimitiveType(t) => { + assert_eq!(values.len(), 1); + let token = t.value_tokens(&values[0]); + quote! { #token } + } + Self::PrimitiveArray(t) => { + assert_eq!(values.len(), t.size); + let tokens = values.iter().map(|v| t.value_type.value_tokens(v)); + quote! { [#(#tokens,)*] } + } + } + } +} + +define_enum_from!(ConstantType, PrimitiveType, Self::PrimitiveType); +define_enum_from!(ConstantType, PrimitiveArray, Self::PrimitiveArray); + +impl From for ConstantType { + fn from(t: BasicType) -> Self { + Self::PrimitiveType(PrimitiveType::BasicType(t)) + } +} + +impl From for ConstantType { + fn from(t: GenericUnboundedString) -> Self { + Self::PrimitiveType(PrimitiveType::GenericUnboundedString(t)) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/member.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/member.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9069a8daed817534d3b25f4c3443489ffc70248 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/member.rs @@ -0,0 +1,142 @@ +use quote::{quote, ToTokens}; + +use super::{primitives::*, sequences::*}; + +macro_rules! define_enum_from { + ($into_t:ty, $from_t:ty, $path:path) => { + impl From<$from_t> for $into_t { + fn from(t: $from_t) -> Self { + $path(t) + } + } + }; +} + +/// A type which is available for member +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum MemberType { + NestableType(NestableType), + Array(Array), + Sequence(Sequence), + BoundedSequence(BoundedSequence), +} + +impl MemberType { + pub fn type_tokens(&self, package: &str) -> (impl ToTokens, impl ToTokens) { + match self { + Self::NestableType(t) => { + let token = t.type_tokens(package); + (quote! {}, quote! { #token }) + } + Self::Array(t) => { + let token = t.type_tokens(package); + ( + quote! { #[serde(with = "serde_big_array::BigArray")] }, + quote! { #token }, + ) + } + Self::Sequence(t) => { + let token = t.type_tokens(package); + (quote! {}, quote! { #token }) + } + Self::BoundedSequence(t) => { + let token = t.type_tokens(package); + (quote! {}, quote! { #token }) + } + } + } + + pub fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + match self { + Self::NestableType(t) => { + let token = t.raw_type_tokens(package); + quote! { #token } + } + Self::Array(t) => { + let token = t.raw_type_tokens(package); + quote! { #token } + } + Self::Sequence(t) => { + let token = t.raw_type_tokens(package); + quote! { #token } + } + Self::BoundedSequence(t) => { + let token = t.raw_type_tokens(package); + quote! { #token } + } + } + } + + pub fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + match self { + Self::NestableType(t) => { + let token = t.raw_ref_type_tokens(package); + quote! { #token } + } + Self::Array(t) => { + let token = t.raw_ref_type_tokens(package); + quote! { #token } + } + Self::Sequence(t) => { + let token = t.raw_ref_type_tokens(package); + quote! { #token } + } + Self::BoundedSequence(t) => { + let token = t.raw_ref_type_tokens(package); + quote! { #token } + } + } + } + + pub fn value_tokens(&self, default: &[String]) -> impl ToTokens { + match self { + Self::NestableType(t) => { + let token = t.value_tokens(&default[0]); + quote! { #token } + } + Self::Array(t) => { + assert_eq!(default.len(), t.size); + let tokens = default.iter().map(|v| t.value_type.value_tokens(v)); + quote! { [#(#tokens,)*] } + } + Self::Sequence(t) => { + let tokens = default.iter().map(|v| t.value_type.value_tokens(v)); + quote! { vec![#(#tokens,)*] } + } + Self::BoundedSequence(t) => { + assert!(default.len() <= t.max_size); + let tokens = default.iter().map(|v| t.value_type.value_tokens(v)); + quote! { vec![#(#tokens,)*] } + } + } + } +} + +define_enum_from!(MemberType, NestableType, Self::NestableType); +define_enum_from!(MemberType, Array, Self::Array); +define_enum_from!(MemberType, Sequence, Self::Sequence); +define_enum_from!(MemberType, BoundedSequence, Self::BoundedSequence); + +impl From for MemberType { + fn from(t: BasicType) -> Self { + Self::NestableType(NestableType::BasicType(t)) + } +} + +impl From for MemberType { + fn from(t: NamedType) -> Self { + Self::NestableType(NestableType::NamedType(t)) + } +} + +impl From for MemberType { + fn from(t: NamespacedType) -> Self { + Self::NestableType(NestableType::NamespacedType(t)) + } +} + +impl From for MemberType { + fn from(t: GenericString) -> Self { + Self::NestableType(NestableType::GenericString(t)) + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/message.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/message.rs new file mode 100644 index 0000000000000000000000000000000000000000..8c0510a413bef40a99d174fc1d832bf12fa3dc16 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/message.rs @@ -0,0 +1,573 @@ +use quote::{format_ident, quote, ToTokens}; +use syn::Ident; + +use super::{primitives::*, sequences::Array, ConstantType, MemberType}; + +/// A member of a structure +#[derive(Debug, Clone)] +pub struct Member { + /// The name of the member + pub name: String, + /// The type of the member + pub r#type: MemberType, + /// The default value of the member (optional) + pub default: Option>, +} + +impl Member { + fn dummy() -> Self { + Self { + name: "structure_needs_at_least_one_member".into(), + r#type: BasicType::U8.into(), + default: None, + } + } + + fn name_token(&self) -> impl ToTokens { + if RUST_KEYWORDS.contains(&self.name.as_str()) { + format_ident!("{}_", self.name) + } else { + format_ident!("{}", self.name) + } + } + + fn rust_type_def(&self, package: &str) -> impl ToTokens { + let name = self.name_token(); + let (attr, type_) = self.r#type.type_tokens(package); + quote! { #attr pub #name: #type_, } + } + + fn default_value(&self) -> impl ToTokens { + let name = self.name_token(); + self.default.as_ref().map_or_else( + || quote! { #name: crate::_core::InternalDefault::_default(), }, + |default| { + let default = self.r#type.value_tokens(default); + quote! { #name: #default, } + }, + ) + } + + fn raw_type_def(&self, package: &str) -> impl ToTokens { + let name = self.name_token(); + let type_ = self.r#type.raw_type_tokens(package); + quote! { pub #name: #type_, } + } + + fn ffi_to_rust(&self) -> impl ToTokens { + let name = self.name_token(); + let value = match &self.r#type { + MemberType::NestableType(NestableType::BasicType(_)) => quote! { self.#name }, + MemberType::Array(Array { + value_type: NestableType::BasicType(_), + .. + }) => quote! { self.#name.clone() }, + _ => quote! { self.#name.to_rust() }, + }; + + quote! { #name: #value, } + } + + fn raw_ref_type_def(&self, package: &str) -> impl ToTokens { + let name = self.name_token(); + let type_ = self.r#type.raw_ref_type_tokens(package); + quote! { pub #name: #type_, } + } + + fn ffi_from_rust(&self) -> impl ToTokens { + let name = self.name_token(); + let value = match &self.r#type { + MemberType::NestableType(NestableType::BasicType(_)) => quote! { from.#name }, + MemberType::Array(Array { + value_type: NestableType::BasicType(_), + .. + }) => quote! { from.#name.clone() }, + _ => quote! { _FFIFromRust::from_rust(&from.#name) }, + }; + quote! { #name: #value, } + } +} + +/// A constant definition +#[derive(Debug, Clone)] +pub struct Constant { + /// The name of the constant + pub name: String, + /// The type of the constant + pub r#type: ConstantType, + /// The value of the constant + pub value: Vec, +} + +impl Constant { + fn token_stream(&self) -> impl ToTokens { + let name = format_ident!("{}", self.name); + let type_ = self.r#type.type_tokens(); + let value = self.r#type.value_tokens(&self.value); + quote! { pub const #name: #type_ = #value; } + } + + fn cxx_method_def_token_stream(&self, struct_name: &str, package_name: &str) -> impl ToTokens { + let name = format_ident!("const_{package_name}__{struct_name}_{}", self.name); + let cxx_name = format_ident!("const_{struct_name}_{}", self.name); + let type_ = self.r#type.type_tokens(); + quote! { + #[namespace = #package_name] + #[cxx_name = #cxx_name] + pub fn #name () -> #type_; + } + } + + fn cxx_method_impl_token_stream(&self, struct_raw_name: &Ident) -> impl ToTokens { + let const_name = format_ident!("{}", self.name); + let name = format_ident!("const_{struct_raw_name}_{}", self.name); + let type_ = self.r#type.type_tokens(); + quote! { + #[allow(non_snake_case, dead_code)] + fn #name () -> #type_ { ffi::#struct_raw_name::#const_name } + } + } +} + +/// A message definition +#[derive(Debug, Clone)] +pub struct Message { + /// The package name + pub package: String, + /// The name of the message + pub name: String, + /// The list of the members + pub members: Vec, + /// The list of the constants + pub constants: Vec, +} + +impl Message { + pub fn struct_token_stream( + &self, + package_name: &str, + gen_cxx_bridge: bool, + ) -> (impl ToTokens, impl ToTokens) { + let cxx_name = format_ident!("{}", self.name); + let struct_raw_name = format_ident!("{package_name}__{}", self.name); + + let rust_type_def_inner = self.members.iter().map(|m| m.rust_type_def(&self.package)); + let constants_def_inner = self.constants.iter().map(|c| c.token_stream()); + let cxx_const_def_inner = self + .constants + .iter() + .map(|c| c.cxx_method_def_token_stream(&self.name, package_name)); + let cxx_const_impl_inner = self + .constants + .iter() + .map(|c| c.cxx_method_impl_token_stream(&struct_raw_name)); + let rust_type_default_inner = self.members.iter().map(|m| m.default_value()); + + let (attributes, cxx_consts) = if gen_cxx_bridge { + let attributes = quote! { + #[namespace = #package_name] + #[cxx_name = #cxx_name] + }; + let consts = quote! { + extern "Rust" { + #(#cxx_const_def_inner)* + } + }; + (attributes, consts) + } else { + (quote! {}, quote! {}) + }; + + let def = if self.members.is_empty() { + quote! { + #[allow(non_camel_case_types)] + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] + #attributes + pub struct #struct_raw_name { + #[serde(skip)] + pub(super) _dummy: u8, + } + + #cxx_consts + } + } else { + quote! { + #[allow(non_camel_case_types)] + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] + #attributes + pub struct #struct_raw_name { + #(#rust_type_def_inner)* + } + + #cxx_consts + } + }; + let default = if self.members.is_empty() { + quote! { + Self { + _dummy: 0, + } + } + } else { + quote! { + Self { + #(#rust_type_default_inner)* + } + } + }; + let impls = quote! { + impl ffi::#struct_raw_name { + #(#constants_def_inner)* + + } + + impl crate::_core::InternalDefault for ffi::#struct_raw_name { + fn _default() -> Self { + #default + } + } + + impl std::default::Default for ffi::#struct_raw_name { + #[inline] + fn default() -> Self { + crate::_core::InternalDefault::_default() + } + } + + impl crate::ros2_client::Message for ffi::#struct_raw_name {} + + #(#cxx_const_impl_inner)* + }; + + (def, impls) + } + + pub fn topic_def(&self, package_name: &str) -> (impl ToTokens, impl ToTokens) { + if self.members.is_empty() { + return (quote! {}, quote! {}); + }; + + let topic_name = format_ident!("Topic__{package_name}__{}", self.name); + let cxx_topic_name = format_ident!("Topic_{}", self.name); + let create_topic = format_ident!("new__Topic__{package_name}__{}", self.name); + let cxx_create_topic = format!("create_topic_{package_name}_{}", self.name); + + let publisher_name = format_ident!("Publisher__{package_name}__{}", self.name); + let cxx_publisher_name = format_ident!("Publisher_{}", self.name); + let create_publisher = format_ident!("new__Publisher__{package_name}__{}", self.name); + let cxx_create_publisher = format_ident!("create_publisher"); + + let struct_raw_name = format_ident!("{package_name}__{}", self.name); + let struct_raw_name_str = struct_raw_name.to_string(); + let self_name = &self.name; + + let publish = format_ident!("publish__{package_name}__{}", self.name); + let cxx_publish = format_ident!("publish"); + + let subscription_name = format_ident!("Subscription__{package_name}__{}", self.name); + let subscription_name_str = subscription_name.to_string(); + let cxx_subscription_name = format_ident!("Subscription_{}", self.name); + let create_subscription = format_ident!("new__Subscription__{package_name}__{}", self.name); + let cxx_create_subscription = format_ident!("create_subscription"); + + let matches = format_ident!("matches__{package_name}__{}", self.name); + let cxx_matches = format_ident!("matches"); + let downcast = format_ident!("downcast__{package_name}__{}", self.name); + let cxx_downcast = format_ident!("downcast"); + + let def = quote! { + #[namespace = #package_name] + #[cxx_name = #cxx_topic_name] + type #topic_name; + #[cxx_name = #cxx_create_topic] + fn #create_topic(self: &Ros2Node, name_space: &str, base_name: &str, qos: Ros2QosPolicies) -> Result>; + #[cxx_name = #cxx_create_publisher] + fn #create_publisher(self: &mut Ros2Node, topic: &Box<#topic_name>, qos: Ros2QosPolicies) -> Result>; + #[cxx_name = #cxx_create_subscription] + fn #create_subscription(self: &mut Ros2Node, topic: &Box<#topic_name>, qos: Ros2QosPolicies, events: &mut CombinedEvents) -> Result>; + + #[namespace = #package_name] + #[cxx_name = #cxx_publisher_name] + type #publisher_name; + #[namespace = #package_name] + #[cxx_name = #cxx_publish] + fn #publish(self: &mut #publisher_name, message: #struct_raw_name) -> Result<()>; + + #[namespace = #package_name] + #[cxx_name = #cxx_subscription_name] + type #subscription_name; + + #[namespace = #package_name] + #[cxx_name = #cxx_matches] + fn #matches(self: &#subscription_name, event: &CombinedEvent) -> bool; + #[namespace = #package_name] + #[cxx_name = #cxx_downcast] + fn #downcast(self: &#subscription_name, event: CombinedEvent) -> Result<#struct_raw_name>; + }; + let imp = quote! { + #[allow(non_camel_case_types)] + pub struct #topic_name(rustdds::Topic); + + impl Ros2Node { + #[allow(non_snake_case)] + pub fn #create_topic(&self, name_space: &str, base_name: &str, qos: ffi::Ros2QosPolicies) -> eyre::Result> { + let name = crate::ros2_client::Name::new(name_space, base_name).map_err(|e| eyre::eyre!(e))?; + let type_name = crate::ros2_client::MessageTypeName::new(#package_name, #self_name); + let topic = self.node.create_topic(&name, type_name, &qos.into())?; + Ok(Box::new(#topic_name(topic))) + } + + #[allow(non_snake_case)] + pub fn #create_publisher(&mut self, topic: &Box<#topic_name>, qos: ffi::Ros2QosPolicies) -> eyre::Result> { + let publisher = self.node.create_publisher(&topic.0, Some(qos.into()))?; + Ok(Box::new(#publisher_name(publisher))) + } + + #[allow(non_snake_case)] + pub fn #create_subscription(&mut self, topic: &Box<#topic_name>, qos: ffi::Ros2QosPolicies, events: &mut crate::ffi::CombinedEvents) -> eyre::Result> { + let subscription = self.node.create_subscription::(&topic.0, Some(qos.into()))?; + let stream = futures_lite::stream::unfold(subscription, |sub| async { + let item = sub.async_take().await; + let item_boxed: Box = Box::new(item); + Some((item_boxed, sub)) + }); + let id = events.events.merge(Box::pin(stream)); + + Ok(Box::new(#subscription_name { id })) + } + } + + #[allow(non_camel_case_types)] + pub struct #publisher_name(crate::ros2_client::Publisher); + + impl #publisher_name { + #[allow(non_snake_case)] + fn #publish(&mut self, message: ffi::#struct_raw_name) -> eyre::Result<()> { + use eyre::Context; + self.0.publish(message).context("publish failed").map_err(|e| eyre::eyre!("{e:?}")) + } + } + + #[allow(non_camel_case_types)] + pub struct #subscription_name { + id: u32, + } + + impl #subscription_name { + #[allow(non_snake_case)] + fn #matches(&self, event: &crate::ffi::CombinedEvent) -> bool { + match &event.event.as_ref().0 { + Some(crate::MergedEvent::External(event)) if event.id == self.id => true, + _ => false + } + } + #[allow(non_snake_case)] + fn #downcast(&self, event: crate::ffi::CombinedEvent) -> eyre::Result { + use eyre::WrapErr; + + match (*event.event).0 { + Some(crate::MergedEvent::External(event)) if event.id == self.id => { + let result = event.event.downcast::>() + .map_err(|_| eyre::eyre!("downcast to {} failed", #struct_raw_name_str))?; + + let (data, _info) = result.with_context(|| format!("failed to receive {} event", #subscription_name_str)).map_err(|e| eyre::eyre!("{e:?}"))?; + Ok(data) + }, + _ => eyre::bail!("not a {} event", #subscription_name_str), + } + } + } + }; + (def, imp) + } + + pub fn alias_token_stream(&self, package_name: &Ident) -> impl ToTokens { + let cxx_name = format_ident!("{}", self.name); + let struct_raw_name = format_ident!("{package_name}__{}", self.name); + + if self.members.is_empty() { + quote! {} + } else { + quote! { + pub use super::super::ffi::#struct_raw_name as #cxx_name; + } + } + } + + pub fn token_stream(&self) -> impl ToTokens { + self.token_stream_args(false) + } + + pub fn token_stream_args(&self, gen_cxx_bridge: bool) -> impl ToTokens { + let rust_type = format_ident!("{}", self.name); + let raw_type = format_ident!("{}_Raw", self.name); + let raw_ref_type = format_ident!("{}_RawRef", self.name); + + let members_for_c = if self.members.is_empty() { + vec![Member::dummy()] + } else { + self.members.clone() + }; + + let attributes = if gen_cxx_bridge { + let namespace = &self.name; + quote! { #[cxx::bridge(namespace = #namespace)] } + } else { + quote! {} + }; + + let rust_type_def_inner = self.members.iter().map(|m| m.rust_type_def(&self.package)); + let constants_def_inner = self.constants.iter().map(|c| c.token_stream()); + let rust_type_default_inner = self.members.iter().map(|m| m.default_value()); + + let raw_type_def_inner = members_for_c.iter().map(|m| m.raw_type_def(&self.package)); + let raw_type_to_rust_inner = self.members.iter().map(|m| m.ffi_to_rust()); + + let raw_ref_type_def_inner = members_for_c + .iter() + .map(|m| m.raw_ref_type_def(&self.package)); + + let raw_ref_type_from_rust_inner = if self.members.is_empty() { + vec![quote! { structure_needs_at_least_one_member: 0, }] + } else { + self.members + .iter() + .map(|m| { + let token = m.ffi_from_rust(); + quote! { #token } + }) + .collect::>() + }; + + quote! { + #[allow(unused_imports)] + use std::convert::TryInto as _; + use std::os::raw::c_void; + + use crate::_core::{ + InternalDefault as _, + FFIFromRust as _FFIFromRust, + FFIToRust as _FFIToRust, + }; + + pub use self::t::#rust_type; + + #attributes + mod t { + #[allow(non_camel_case_types)] + #[derive(std::fmt::Debug, std::clone::Clone, std::cmp::PartialEq, serde::Serialize, serde::Deserialize)] + pub struct #rust_type { + #(#rust_type_def_inner)* + } + } + + impl #rust_type { + #(#constants_def_inner)* + } + + + impl crate::_core::MessageT for #rust_type { + type Raw = #raw_type; + type RawRef = #raw_ref_type; + + + } + + impl crate::_core::InternalDefault for #rust_type { + fn _default() -> Self { + Self { + #(#rust_type_default_inner)* + } + } + } + + impl std::default::Default for #rust_type { + #[inline] + fn default() -> Self { + crate::_core::InternalDefault::_default() + } + } + + + #[allow(non_camel_case_types)] + #[repr(C)] + #[derive(std::fmt::Debug)] + pub struct #raw_type { + #(#raw_type_def_inner)* + } + + impl crate::_core::FFIToRust for #raw_type { + type Target = #rust_type; + + unsafe fn to_rust(&self) -> Self::Target { + Self::Target { + #(#raw_type_to_rust_inner)* + } + } + } + + unsafe impl std::marker::Send for #raw_type {} + unsafe impl std::marker::Sync for #raw_type {} + + #[allow(non_camel_case_types)] + #[doc(hidden)] + #[repr(C)] + #[derive(std::fmt::Debug)] + pub struct #raw_ref_type { + #(#raw_ref_type_def_inner)* + } + + impl crate::_core::FFIFromRust for #raw_ref_type { + type From = #rust_type; + + #[allow(unused_variables)] + unsafe fn from_rust(from: &Self::From) -> Self { + Self { + #(#raw_ref_type_from_rust_inner)* + } + } + } + + #[cfg(test)] + mod test { + use super::*; + use crate::_core::MessageT; + + #[test] + fn test_rust_default() { + let _ = #rust_type::default(); + } + + #[test] + fn test_raw_default() { + let _ = #raw_type::default(); + } + + #[test] + fn test_type_support() { + let ptr = #rust_type::type_support(); + assert!(!ptr.is_null()); + } + } + + } + } +} + +/// Keywords in Rust +/// +/// +const RUST_KEYWORDS: [&str; 51] = [ + // Strict keywords + "as", "break", "const", "continue", "crate", "else", "enum", "extern", "false", "fn", "for", + "if", "impl", "in", "let", "loop", "match", "mod", "move", "mut", "pub", "ref", "return", + "self", "Self", "static", "struct", "super", "trait", "true", "type", "unsafe", "use", "where", + "while", // + // Strict keywords (2018+) + "async", "await", "dyn", // + // Reserved keywords + "abstract", "become", "box", "do", "final", "macro", "override", "priv", "typeof", "unsized", + "virtual", "yield", // + // Reserved keywords (2018+) + "try", +]; diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/mod.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7cd7d6a2d8b9949a553f1ee66b06fdca8aee0037 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/mod.rs @@ -0,0 +1,15 @@ +mod action; +mod constant; +mod member; +mod message; +mod package; +pub mod primitives; +pub mod sequences; +mod service; + +pub use action::Action; +pub use constant::ConstantType; +pub use member::MemberType; +pub use message::{Constant, Member, Message}; +pub use package::Package; +pub use service::Service; diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/package.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/package.rs new file mode 100644 index 0000000000000000000000000000000000000000..47f3fa19f3d8c4b45663db18130497596be2b726 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/package.rs @@ -0,0 +1,141 @@ +use proc_macro2::Span; +use quote::{quote, ToTokens}; +use syn::Ident; + +use crate::types::{Action, Message, Service}; + +#[derive(Debug)] +pub struct Package { + pub name: String, + pub messages: Vec, + pub services: Vec, + pub actions: Vec, +} + +impl Package { + pub const fn new(name: String) -> Self { + Self { + name, + messages: Vec::new(), + services: Vec::new(), + actions: Vec::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.messages.is_empty() && self.services.is_empty() && self.actions.is_empty() + } + + pub fn message_structs(&self, gen_cxx_bridge: bool) -> (impl ToTokens, impl ToTokens) { + if self.messages.is_empty() { + // empty msg + (quote! {}, quote! {}) + } else { + let items = self + .messages + .iter() + .map(|v| v.struct_token_stream(&self.name, gen_cxx_bridge)); + let defs = items.clone().map(|(def, _)| def); + let impls = items.clone().map(|(_, im)| im); + let def_tokens = quote! { + #(#defs)* + }; + let impl_tokens = quote! { + #(#impls)* + }; + (def_tokens, impl_tokens) + } + } + + fn message_aliases(&self, package_name: &Ident) -> impl ToTokens { + if self.messages.is_empty() { + quote! { + // empty msg + } + } else { + let items = self + .messages + .iter() + .map(|v| v.alias_token_stream(package_name)); + quote! { + pub mod msg { + #(#items)* + } + } + } + } + + fn service_aliases(&self, package_name: &Ident) -> impl ToTokens { + if self.services.is_empty() { + quote! { + // empty msg + } + } else { + let items = self + .services + .iter() + .map(|v| v.alias_token_stream(package_name)); + quote! { + pub mod service { + #(#items)* + } + } + } + } + + fn services_block(&self) -> impl ToTokens { + if self.services.is_empty() { + quote! { + // empty srv + } + } else { + let items = self.services.iter().map(|v| v.token_stream_with_mod()); + quote! { + pub mod srv { + #(#items)* + } // srv + } + } + } + + fn actions_block(&self) -> impl ToTokens { + if self.actions.is_empty() { + quote! { + // empty srv + } + } else { + let items = self.actions.iter().map(|v| v.token_stream_with_mod()); + quote! { + pub mod action { + #(#items)* + } // action + } + } + } + + pub fn aliases_token_stream(&self) -> impl ToTokens { + let package_name = Ident::new(&self.name, Span::call_site()); + let aliases = self.message_aliases(&package_name); + let service_aliases = self.service_aliases(&package_name); + + quote! { + pub mod #package_name { + #aliases + #service_aliases + } + } + } + + pub fn token_stream(&self, _gen_cxx_bridge: bool) -> impl ToTokens { + let name = Ident::new(&self.name, Span::call_site()); + let services_block = self.services_block(); + let actions_block = self.actions_block(); + + quote! { + pub mod #name { + #services_block + #actions_block + } + } + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/primitives.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/primitives.rs new file mode 100644 index 0000000000000000000000000000000000000000..c6da532c3f678500b6931ad7772d4efa03aa1216 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/primitives.rs @@ -0,0 +1,411 @@ +use std::fmt; + +use proc_macro2::{Ident, Literal, Span}; +use quote::{format_ident, quote, ToTokens}; + +macro_rules! define_enum_from { + ($into_t:ty, $from_t:ty, $path:path) => { + impl From<$from_t> for $into_t { + fn from(t: $from_t) -> Self { + $path(t) + } + } + }; +} + +/// A basic type according to the IDL specification. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum BasicType { + // signed integer type + /// Rust: [i8], C++: `int8_t` + I8, + /// Rust: [i16], C++: `int16_t` + I16, + /// Rust: [i32], C++: `int32_t` + I32, + /// Rust: [i64], C++: `int64_t` + I64, + + // unsigned integer type + /// Rust: [u8], C++: `uint8_t` + U8, + /// Rust: [u16], C++: `uint16_t` + U16, + /// Rust: [u32], C++: `uint32_t` + U32, + /// Rust: [u64], C++: `uint64_t` + U64, + + // floating point type + /// Rust: [f32], C++: `float` + F32, + /// Rust: [f64], C++: `double` + F64, + // long double is not supported + + // boolean type + /// Rust: [bool], C++: `bool` + Bool, + + // duplicated type + /// Rust: [u8], C++: `unsigned char` + Char, + /// Rust: [u8], C++: `unsigned char` + Byte, +} + +impl BasicType { + pub fn parse(s: &str) -> Option { + Some(match s { + "int8" => Self::I8, + "int16" => Self::I16, + "int32" => Self::I32, + "int64" => Self::I64, + "uint8" => Self::U8, + "uint16" => Self::U16, + "uint32" => Self::U32, + "uint64" => Self::U64, + "float32" => Self::F32, + "float64" => Self::F64, + "bool" => Self::Bool, + "char" => Self::Char, + "byte" => Self::Byte, + _ => { + return None; + } + }) + } + + pub fn type_tokens(self) -> impl ToTokens { + match self { + Self::I8 => quote! { i8 }, + Self::I16 => quote! { i16 }, + Self::I32 => quote! { i32 }, + Self::I64 => quote! { i64 }, + Self::U8 | Self::Char | Self::Byte => quote! { u8 }, + Self::U16 => quote! { u16 }, + Self::U32 => quote! { u32 }, + Self::U64 => quote! { u64 }, + Self::F32 => quote! { f32 }, + Self::F64 => quote! { f64 }, + Self::Bool => quote! { bool }, + } + } + + fn value_literal(self, value: &str) -> Option { + Some(match self { + Self::I8 => Literal::i8_suffixed(value.parse().unwrap()), + Self::I16 => Literal::i16_suffixed(value.parse().unwrap()), + Self::I32 => Literal::i32_suffixed(value.parse().unwrap()), + Self::I64 => Literal::i64_suffixed(value.parse().unwrap()), + Self::U8 | Self::Char | Self::Byte => Literal::u8_suffixed(value.parse().unwrap()), + Self::U16 => Literal::u16_suffixed(value.parse().unwrap()), + Self::U32 => Literal::u32_suffixed(value.parse().unwrap()), + Self::U64 => Literal::u64_suffixed(value.parse().unwrap()), + Self::F32 => Literal::f32_suffixed(value.parse().unwrap()), + Self::F64 => Literal::f64_suffixed(value.parse().unwrap()), + // bool is Ident not Literal! + Self::Bool => return None, + }) + } + + fn value_tokens(self, value: &str) -> impl ToTokens { + match self { + Self::Bool => match value { + "true" => quote! { true }, + "false" => quote! { false }, + _ => unreachable!(), + }, + _ => { + let value = self.value_literal(value).unwrap(); + quote! { #value } + } + } + } +} + +/// A type identified by the name +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct NamedType(pub String); + +impl NamedType { + fn type_tokens(&self, package: &str) -> impl ToTokens { + let package = Ident::new(package, Span::call_site()); + let name = Ident::new(&self.0, Span::call_site()); + let ident = format_ident!("{package}__{name}"); + quote! { #ident } + } + + fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + let package = Ident::new(package, Span::call_site()); + let namespace = Ident::new("msg", Span::call_site()); + let name = format_ident!("{}_Raw", self.0); + quote! { crate::#package::#namespace::#name } + } + + fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + let package = Ident::new(package, Span::call_site()); + let namespace = Ident::new("msg", Span::call_site()); + let name = format_ident!("{}_RawRef", self.0); + quote! { crate::#package::#namespace::#name } + } +} + +impl fmt::Display for NamedType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// A type identified by a name in a namespaced scope +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct NamespacedType { + /// A package name which this type belongs to + /// e.g. `std_msgs` + pub package: String, + /// msg or action + pub namespace: String, + /// A name of message + /// e.g. `Bool` + pub name: String, +} + +impl NamespacedType { + fn type_tokens(&self) -> impl ToTokens { + let package = Ident::new(&self.package, Span::call_site()); + let name = Ident::new(&self.name, Span::call_site()); + let ident = format_ident!("{package}__{name}"); + quote! { #ident } + } + + fn raw_type_tokens(&self) -> impl ToTokens { + let package = Ident::new(&self.package, Span::call_site()); + let namespace = Ident::new(&self.namespace, Span::call_site()); + let name = format_ident!("{}_Raw", self.name); + quote! { crate::#package::#namespace::#name } + } + + fn raw_ref_type_tokens(&self) -> impl ToTokens { + let package = Ident::new(&self.package, Span::call_site()); + let namespace = Ident::new(&self.namespace, Span::call_site()); + let name = format_ident!("{}_RawRef", self.name); + quote! { crate::#package::#namespace::#name } + } +} + +impl fmt::Display for NamespacedType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}/{}", self.package, self.namespace, self.name) + } +} + +/// A string type +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GenericString { + String, + WString, + BoundedString(usize), + BoundedWString(usize), +} + +impl GenericString { + const fn is_wide(self) -> bool { + matches!(self, Self::WString | Self::BoundedWString(_)) + } + + fn type_tokens(self) -> impl ToTokens { + if self.is_wide() { + quote! { U16String } + } else { + quote! { String } + } + } + + fn raw_type_tokens(self) -> impl ToTokens { + if self.is_wide() { + quote! { crate::_core::FFIWString } + } else { + quote! { crate::_core::FFIString } + } + } + + fn raw_ref_type_tokens(self) -> impl ToTokens { + if self.is_wide() { + quote! { crate::_core::OwnedFFIWString } + } else { + quote! { crate::_core::OwnedFFIString } + } + } + + fn value_tokens(self, value: &str) -> impl ToTokens { + // TODO: Assertion + let value = Literal::string(value); + if self.is_wide() { + quote! { ffi::U16String::from_str(#value) } + } else { + quote! { ::std::string::String::from(#value) } + } + } +} + +impl From for GenericString { + fn from(t: GenericUnboundedString) -> Self { + match t { + GenericUnboundedString::String => Self::String, + GenericUnboundedString::WString => Self::WString, + } + } +} + +/// A generic unbounded string type +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GenericUnboundedString { + String, + WString, +} + +impl GenericUnboundedString { + fn type_tokens(self) -> impl ToTokens { + quote! { &'static str } + } + + fn value_tokens(self, value: &str) -> impl ToTokens { + let value = Literal::string(value); + quote! { #value } + } +} + +/// A type which can be used inside nested types +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum NestableType { + BasicType(BasicType), + NamedType(NamedType), + NamespacedType(NamespacedType), + GenericString(GenericString), +} + +impl NestableType { + pub fn type_tokens(&self, package: &str) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::NamedType(t) => { + let token = t.type_tokens(package); + quote! { #token } + } + Self::NamespacedType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::GenericString(t) => { + let token = t.type_tokens(); + quote! { #token } + } + } + } + + pub fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::NamedType(t) => { + let token = t.raw_type_tokens(package); + quote! { #token } + } + Self::NamespacedType(t) => { + let token = t.raw_type_tokens(); + quote! { #token } + } + Self::GenericString(t) => { + let token = t.raw_type_tokens(); + quote! { #token } + } + } + } + + pub fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::NamedType(t) => { + let token = t.raw_ref_type_tokens(package); + quote! { #token } + } + Self::NamespacedType(t) => { + let token = t.raw_ref_type_tokens(); + quote! { #token } + } + Self::GenericString(t) => { + let token = t.raw_ref_type_tokens(); + quote! { #token } + } + } + } + + pub fn value_tokens(&self, default: &str) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.value_tokens(default); + quote! { #token } + } + Self::GenericString(t) => { + let token = t.value_tokens(default); + quote! { #token } + } + _ => unreachable!(), + } + } +} + +define_enum_from!(NestableType, BasicType, Self::BasicType); +define_enum_from!(NestableType, NamedType, Self::NamedType); +define_enum_from!(NestableType, NamespacedType, Self::NamespacedType); +define_enum_from!(NestableType, GenericString, Self::GenericString); + +/// A primitive type which can be used for constant +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PrimitiveType { + BasicType(BasicType), + GenericUnboundedString(GenericUnboundedString), +} + +impl PrimitiveType { + pub fn type_tokens(self) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.type_tokens(); + quote! { #token } + } + Self::GenericUnboundedString(t) => { + let token = t.type_tokens(); + quote! { #token } + } + } + } + + pub fn value_tokens(self, value: &str) -> impl ToTokens { + match self { + Self::BasicType(t) => { + let token = t.value_tokens(value); + quote! { #token } + } + Self::GenericUnboundedString(t) => { + let token = t.value_tokens(value); + quote! { #token } + } + } + } +} + +define_enum_from!(PrimitiveType, BasicType, Self::BasicType); +define_enum_from!( + PrimitiveType, + GenericUnboundedString, + Self::GenericUnboundedString +); diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/sequences.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/sequences.rs new file mode 100644 index 0000000000000000000000000000000000000000..2c4c187202c7fa9b161fcb501789e41ef4b448f0 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/sequences.rs @@ -0,0 +1,109 @@ +use quote::{quote, ToTokens}; + +use super::primitives::*; + +/// An array type with a static size +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Array { + /// The type of the elements + pub value_type: NestableType, + /// The number of elements in the array + pub size: usize, +} + +impl Array { + pub fn type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.type_tokens(package); + let size = self.size; + quote! { [#inner_type; #size] } + } + + pub fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_type_tokens(package); + let size = self.size; + quote! { [#inner_type; #size] } + } + + pub fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_ref_type_tokens(package); + let size = self.size; + quote! { [#inner_type; #size] } + } +} + +/// A sequence type with an unlimited number of elements +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Sequence { + /// The type of the elements + pub value_type: NestableType, +} + +impl Sequence { + pub fn type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.type_tokens(package); + quote! { Vec<#inner_type> } + } + + pub fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_type_tokens(package); + quote! { crate::_core::FFISeq<#inner_type> } + } + + pub fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_ref_type_tokens(package); + match self.value_type { + NestableType::BasicType(_) => { + quote! { crate::_core::RefFFISeq<#inner_type> } + } + _ => quote! { crate::_core::OwnedFFISeq<#inner_type> }, + } + } +} + +/// A sequence type with a maximum number of elements +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BoundedSequence { + /// The type of the elements + pub value_type: NestableType, + /// The maximum number of elements in the sequence + pub max_size: usize, +} + +impl BoundedSequence { + pub fn type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.type_tokens(package); + quote! { Vec<#inner_type> } + } + + pub fn raw_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_type_tokens(package); + quote! { crate::_core::FFISeq<#inner_type> } + } + + pub fn raw_ref_type_tokens(&self, package: &str) -> impl ToTokens { + let inner_type = self.value_type.raw_ref_type_tokens(package); + match self.value_type { + NestableType::BasicType(_) => { + quote! { crate::_core::RefFFISeq<#inner_type> } + } + _ => quote! { crate::_core::OwnedFFISeq<#inner_type> }, + } + } +} + +/// An array type of a primitive type +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PrimitiveArray { + /// The type of the elements + pub value_type: PrimitiveType, + /// The number of elements in the array + pub size: usize, +} + +impl PrimitiveArray { + pub fn type_tokens(&self) -> impl ToTokens { + let inner_type = self.value_type.type_tokens(); + let size = self.size; + quote! { [#inner_type; #size] } + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/src/types/service.rs b/libraries/extensions/ros2-bridge/msg-gen/src/types/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..531dc76207e1580caf7af8eb9e9b2115449bb2ca --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/src/types/service.rs @@ -0,0 +1,287 @@ +use heck::SnakeCase; +use quote::{format_ident, quote, ToTokens}; +use syn::Ident; + +use super::Message; + +/// A service definition +#[derive(Debug, Clone)] +pub struct Service { + /// The name of The package + pub package: String, + /// The name of the service + pub name: String, + /// The type of the request + pub request: Message, + /// The type of the response + pub response: Message, +} + +impl Service { + pub fn struct_token_stream( + &self, + package_name: &str, + gen_cxx_bridge: bool, + ) -> (impl ToTokens, impl ToTokens) { + let (request_def, request_impl) = self + .request + .struct_token_stream(package_name, gen_cxx_bridge); + let (response_def, response_impl) = self + .response + .struct_token_stream(package_name, gen_cxx_bridge); + + let def = quote! { + #request_def + #response_def + }; + + let impls = quote! { + #request_impl + #response_impl + }; + + (def, impls) + } + + pub fn alias_token_stream(&self, package_name: &Ident) -> impl ToTokens { + let srv_type = format_ident!("{}", self.name); + let req_type_raw = format_ident!("{package_name}__{}_Request", self.name); + let res_type_raw = format_ident!("{package_name}__{}_Response", self.name); + + let req_type = format_ident!("{}Request", self.name); + let res_type = format_ident!("{}Response", self.name); + + let request_type_name = req_type.to_string(); + let response_type_name = res_type.to_string(); + + quote! { + #[allow(non_camel_case_types)] + #[derive(std::fmt::Debug)] + pub struct #srv_type; + + impl crate::ros2_client::Service for #srv_type { + type Request = #req_type; + type Response = #res_type; + + fn request_type_name(&self) -> &str { + #request_type_name + } + fn response_type_name(&self) -> &str { + #response_type_name + } + } + + pub use super::super::ffi::#req_type_raw as #req_type; + pub use super::super::ffi::#res_type_raw as #res_type; + } + } + + pub fn cxx_service_creation_functions( + &self, + package_name: &str, + ) -> (impl ToTokens, impl ToTokens) { + let client_name = format_ident!("Client__{package_name}__{}", self.name); + let cxx_client_name = format_ident!("Client_{}", self.name); + let create_client = format_ident!("new_Client__{package_name}__{}", self.name); + let cxx_create_client = format!("create_client_{package_name}_{}", self.name); + + let package = format_ident!("{package_name}"); + let self_name = format_ident!("{}", self.name); + let self_name_str = &self.name; + + let wait_for_service = format_ident!("wait_for_service__{package_name}__{}", self.name); + let cxx_wait_for_service = format_ident!("wait_for_service"); + let send_request = format_ident!("send_request__{package_name}__{}", self.name); + let cxx_send_request = format_ident!("send_request"); + let req_type_raw = format_ident!("{package_name}__{}_Request", self.name); + let res_type_raw = format_ident!("{package_name}__{}_Response", self.name); + let res_type_raw_str = res_type_raw.to_string(); + + let matches = format_ident!("matches__{package_name}__{}", self.name); + let cxx_matches = format_ident!("matches"); + let downcast = format_ident!("downcast__{package_name}__{}", self.name); + let cxx_downcast = format_ident!("downcast"); + + let def = quote! { + #[namespace = #package_name] + #[cxx_name = #cxx_client_name] + type #client_name; + // TODO: add `merged_streams` argument (for sending replies) + #[cxx_name = #cxx_create_client] + fn #create_client(self: &mut Ros2Node, name_space: &str, base_name: &str, qos: Ros2QosPolicies, events: &mut CombinedEvents) -> Result>; + + #[namespace = #package_name] + #[cxx_name = #cxx_wait_for_service] + fn #wait_for_service(self: &mut #client_name, node: &Box) -> Result<()>; + #[namespace = #package_name] + #[cxx_name = #cxx_send_request] + fn #send_request(self: &mut #client_name, request: #req_type_raw) -> Result<()>; + #[namespace = #package_name] + #[cxx_name = #cxx_matches] + fn #matches(self: &#client_name, event: &CombinedEvent) -> bool; + #[namespace = #package_name] + #[cxx_name = #cxx_downcast] + fn #downcast(self: &#client_name, event: CombinedEvent) -> Result<#res_type_raw>; + }; + let imp = quote! { + impl Ros2Node { + #[allow(non_snake_case)] + pub fn #create_client(&mut self, name_space: &str, base_name: &str, qos: ffi::Ros2QosPolicies, events: &mut crate::ffi::CombinedEvents) -> eyre::Result> { + use futures::StreamExt as _; + + let client = self.node.create_client::< #package :: service :: #self_name >( + ros2_client::ServiceMapping::Enhanced, + &ros2_client::Name::new(name_space, base_name).unwrap(), + &ros2_client::ServiceTypeName::new(#package_name, #self_name_str), + qos.clone().into(), + qos.into(), + ).map_err(|e| eyre::eyre!("{e:?}"))?; + let (response_tx, response_rx) = flume::bounded(1); + let stream = response_rx.into_stream().map(|v: eyre::Result<_>| Box::new(v) as Box); + let id = events.events.merge(Box::pin(stream)); + + Ok(Box::new(#client_name { + client: std::sync::Arc::new(client), + response_tx: std::sync::Arc::new(response_tx), + executor: self.executor.clone(), + stream_id: id, + })) + } + } + + #[allow(non_camel_case_types)] + pub struct #client_name { + client: std::sync::Arc>, + response_tx: std::sync::Arc>>, + executor: std::sync::Arc, + stream_id: u32, + } + + impl #client_name { + #[allow(non_snake_case)] + fn #wait_for_service(self: &mut #client_name, node: &Box) -> eyre::Result<()> { + let service_ready = async { + for _ in 0..10 { + let ready = self.client.wait_for_service(&node.node); + futures::pin_mut!(ready); + let timeout = futures_timer::Delay::new(std::time::Duration::from_secs(2)); + match futures::future::select(ready, timeout).await { + futures::future::Either::Left(((), _)) => { + return Ok(()); + } + futures::future::Either::Right(_) => { + eprintln!("timeout while waiting for service, retrying"); + } + } + } + eyre::bail!("service not available"); + }; + futures::executor::block_on(service_ready)?; + Ok(()) + } + + #[allow(non_snake_case)] + fn #send_request(&mut self, request: ffi::#req_type_raw) -> eyre::Result<()> { + use eyre::WrapErr; + use futures::task::SpawnExt as _; + + let request_id = futures::executor::block_on(self.client.async_send_request(request.clone())) + .context("failed to send request") + .map_err(|e| eyre::eyre!("{e:?}"))?; + let client = self.client.clone(); + let response_tx = self.response_tx.clone(); + let send_result = async move { + let response = client.async_receive_response(request_id).await.with_context(|| format!("failed to receive response for request {request_id:?}")); + if response_tx.send_async(response).await.is_err() { + tracing::warn!("failed to send service response"); + } + }; + self.executor.spawn(send_result).context("failed to spawn response task").map_err(|e| eyre::eyre!("{e:?}"))?; + Ok(()) + } + + #[allow(non_snake_case)] + fn #matches(&self, event: &crate::ffi::CombinedEvent) -> bool { + match &event.event.as_ref().0 { + Some(crate::MergedEvent::External(event)) if event.id == self.stream_id => true, + _ => false + } + } + #[allow(non_snake_case)] + fn #downcast(&self, event: crate::ffi::CombinedEvent) -> eyre::Result { + use eyre::WrapErr; + + match (*event.event).0 { + Some(crate::MergedEvent::External(event)) if event.id == self.stream_id => { + let result = event.event.downcast::>() + .map_err(|_| eyre::eyre!("downcast to {} failed", #res_type_raw_str))?; + + let data = result.with_context(|| format!("failed to receive {} response", #self_name_str)) + .map_err(|e| eyre::eyre!("{e:?}"))?; + Ok(data) + }, + _ => eyre::bail!("not a {} response event", #self_name_str), + } + } + } + }; + (def, imp) + } + + pub fn token_stream_with_mod(&self) -> impl ToTokens { + let mod_name = format_ident!("_{}", self.name.to_snake_case()); + let inner = self.token_stream(); + quote! { + pub use #mod_name::*; + mod #mod_name { + #inner + } + } + } + + pub fn token_stream(&self) -> impl ToTokens { + let srv_type = format_ident!("{}", self.name); + let req_type = format_ident!("{}_Request", self.name); + let res_type = format_ident!("{}_Response", self.name); + + let request_body = self.request.token_stream(); + let response_body = self.response.token_stream(); + + quote! { + use std::os::raw::c_void; + + pub use self::request::*; + pub use self::response::*; + + #[allow(non_camel_case_types)] + #[derive(std::fmt::Debug)] + pub struct #srv_type; + + + impl crate::_core::ServiceT for #srv_type { + type Request = #req_type; + type Response = #res_type; + } + + mod request { + #request_body + } // mod request + + mod response { + #response_body + } // mod response + + #[cfg(test)] + mod test { + use super::*; + use crate::_core::ServiceT; + + #[test] + fn test_type_support() { + let ptr = #srv_type::type_support(); + assert!(!ptr.is_null()); + } + } + } + } +} diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/action/Fibonacci.action b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/action/Fibonacci.action new file mode 100644 index 0000000000000000000000000000000000000000..b2591f28ede10034f653827a06b028efec82f250 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/action/Fibonacci.action @@ -0,0 +1,8 @@ +#goal definition +int32 order +--- +#result definition +int32[] sequence +--- +#feedback +int32[] sequence diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Arrays.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Arrays.msg new file mode 100644 index 0000000000000000000000000000000000000000..4e2d280b490a8ead3c2ca2eca9d9edc75658aa04 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Arrays.msg @@ -0,0 +1,34 @@ +# Arrays of different types +bool[3] bool_values +byte[3] byte_values +char[3] char_values +float32[3] float32_values +float64[3] float64_values +int8[3] int8_values +uint8[3] uint8_values +int16[3] int16_values +uint16[3] uint16_values +int32[3] int32_values +uint32[3] uint32_values +int64[3] int64_values +uint64[3] uint64_values +string[3] string_values +BasicTypes[3] basic_types_values +Constants[3] constants_values +Defaults[3] defaults_values +bool[3] bool_values_default [false, true, false] +byte[3] byte_values_default [0, 1, 255] +char[3] char_values_default [0, 1, 127] +float32[3] float32_values_default [1.125, 0.0, -1.125] +float64[3] float64_values_default [3.1415, 0.0, -3.1415] +int8[3] int8_values_default [0, 127, -128] +uint8[3] uint8_values_default [0, 1, 255] +int16[3] int16_values_default [0, 32767, -32768] +uint16[3] uint16_values_default [0, 1, 65535] +int32[3] int32_values_default [0, 2147483647, -2147483648] +uint32[3] uint32_values_default [0, 1, 4294967295] +int64[3] int64_values_default [0, 9223372036854775807, -9223372036854775808] +uint64[3] uint64_values_default [0, 1, 18446744073709551615] +string[3] string_values_default ["", "max value", "min value"] +# Regression test: check alignment of basic field after an array field is correct +int32 alignment_check diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BasicTypes.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BasicTypes.msg new file mode 100644 index 0000000000000000000000000000000000000000..eaf9b9ef34d8a0ed535932ac88f2590ac71573e8 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BasicTypes.msg @@ -0,0 +1,13 @@ +bool bool_value +byte byte_value +char char_value +float32 float32_value +float64 float64_value +int8 int8_value +uint8 uint8_value +int16 int16_value +uint16 uint16_value +int32 int32_value +uint32 uint32_value +int64 int64_value +uint64 uint64_value diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BoundedSequences.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BoundedSequences.msg new file mode 100644 index 0000000000000000000000000000000000000000..e369ffc394d596f0d7903fff1899ea4a967391ff --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/BoundedSequences.msg @@ -0,0 +1,34 @@ +# Bounded sequences of different types +bool[<=3] bool_values +byte[<=3] byte_values +char[<=3] char_values +float32[<=3] float32_values +float64[<=3] float64_values +int8[<=3] int8_values +uint8[<=3] uint8_values +int16[<=3] int16_values +uint16[<=3] uint16_values +int32[<=3] int32_values +uint32[<=3] uint32_values +int64[<=3] int64_values +uint64[<=3] uint64_values +string[<=3] string_values +BasicTypes[<=3] basic_types_values +Constants[<=3] constants_values +Defaults[<=3] defaults_values +bool[<=3] bool_values_default [false, true, false] +byte[<=3] byte_values_default [0, 1, 255] +char[<=3] char_values_default [0, 1, 127] +float32[<=3] float32_values_default [1.125, 0.0, -1.125] +float64[<=3] float64_values_default [3.1415, 0.0, -3.1415] +int8[<=3] int8_values_default [0, 127, -128] +uint8[<=3] uint8_values_default [0, 1, 255] +int16[<=3] int16_values_default [0, 32767, -32768] +uint16[<=3] uint16_values_default [0, 1, 65535] +int32[<=3] int32_values_default [0, 2147483647, -2147483648] +uint32[<=3] uint32_values_default [0, 1, 4294967295] +int64[<=3] int64_values_default [0, 9223372036854775807, -9223372036854775808] +uint64[<=3] uint64_values_default [0, 1, 18446744073709551615] +string[<=3] string_values_default ["", "max value", "min value"] +# Regression test: check alignment of basic field after a sequence field is correct +int32 alignment_check diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Constants.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Constants.msg new file mode 100644 index 0000000000000000000000000000000000000000..c996859356721704f6b7415b88400a367e6ca1dd --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Constants.msg @@ -0,0 +1,13 @@ +bool BOOL_CONST=true +byte BYTE_CONST=50 +char CHAR_CONST=100 +float32 FLOAT32_CONST=1.125 +float64 FLOAT64_CONST=1.125 +int8 INT8_CONST=-50 +uint8 UINT8_CONST=200 +int16 INT16_CONST=-1000 +uint16 UINT16_CONST=2000 +int32 INT32_CONST=-30000 +uint32 UINT32_CONST=60000 +int64 INT64_CONST=-40000000 +uint64 UINT64_CONST=50000000 diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Defaults.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Defaults.msg new file mode 100644 index 0000000000000000000000000000000000000000..eb4832f54b09f2662bb71109936da89f4199caf9 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Defaults.msg @@ -0,0 +1,13 @@ +bool bool_value true +byte byte_value 50 +char char_value 100 +float32 float32_value 1.125 +float64 float64_value 1.125 +int8 int8_value -50 +uint8 uint8_value 200 +int16 int16_value -1000 +uint16 uint16_value 2000 +int32 int32_value -30000 +uint32 uint32_value 60000 +int64 int64_value -40000000 +uint64 uint64_value 50000000 diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Empty.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Empty.msg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/MultiNested.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/MultiNested.msg new file mode 100644 index 0000000000000000000000000000000000000000..4cfb10c33821463f1e6421bed60a27828150b75e --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/MultiNested.msg @@ -0,0 +1,10 @@ +# Multiple levels of nested messages +Arrays[3] array_of_arrays +BoundedSequences[3] array_of_bounded_sequences +UnboundedSequences[3] array_of_unbounded_sequences +Arrays[<=3] bounded_sequence_of_arrays +BoundedSequences[<=3] bounded_sequence_of_bounded_sequences +UnboundedSequences[<=3] bounded_sequence_of_unbounded_sequences +Arrays[] unbounded_sequence_of_arrays +BoundedSequences[] unbounded_sequence_of_bounded_sequences +UnboundedSequences[] unbounded_sequence_of_unbounded_sequences diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Nested.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Nested.msg new file mode 100644 index 0000000000000000000000000000000000000000..9adb9a59dbc123d98b9d488f92c1b5fc2160c10f --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Nested.msg @@ -0,0 +1 @@ +BasicTypes basic_types_value diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Strings.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Strings.msg new file mode 100644 index 0000000000000000000000000000000000000000..e7f5e45fb5f55d677f1e97723efdf7d11ccaf188 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/Strings.msg @@ -0,0 +1,13 @@ +string string_value +string string_value_default1 "Hello world!" +string string_value_default2 "Hello'world!" +string string_value_default3 'Hello"world!' +string string_value_default4 'Hello\'world!' +string string_value_default5 "Hello\"world!" +string STRING_CONST="Hello world!" +string<=22 bounded_string_value +string<=22 bounded_string_value_default1 "Hello world!" +string<=22 bounded_string_value_default2 "Hello'world!" +string<=22 bounded_string_value_default3 'Hello"world!' +string<=22 bounded_string_value_default4 'Hello\'world!' +string<=22 bounded_string_value_default5 "Hello\"world!" diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/UnboundedSequences.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/UnboundedSequences.msg new file mode 100644 index 0000000000000000000000000000000000000000..36c02d96e815230e81a576cb79252d3ae4d4d84d --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/UnboundedSequences.msg @@ -0,0 +1,34 @@ +# Unbounded sequences of different types +bool[] bool_values +byte[] byte_values +char[] char_values +float32[] float32_values +float64[] float64_values +int8[] int8_values +uint8[] uint8_values +int16[] int16_values +uint16[] uint16_values +int32[] int32_values +uint32[] uint32_values +int64[] int64_values +uint64[] uint64_values +string[] string_values +BasicTypes[] basic_types_values +Constants[] constants_values +Defaults[] defaults_values +bool[] bool_values_default [false, true, false] +byte[] byte_values_default [0, 1, 255] +char[] char_values_default [0, 1, 127] +float32[] float32_values_default [1.125, 0.0, -1.125] +float64[] float64_values_default [3.1415, 0.0, -3.1415] +int8[] int8_values_default [0, 127, -128] +uint8[] uint8_values_default [0, 1, 255] +int16[] int16_values_default [0, 32767, -32768] +uint16[] uint16_values_default [0, 1, 65535] +int32[] int32_values_default [0, 2147483647, -2147483648] +uint32[] uint32_values_default [0, 1, 4294967295] +int64[] int64_values_default [0, 9223372036854775807, -9223372036854775808] +uint64[] uint64_values_default [0, 1, 18446744073709551615] +string[] string_values_default ["", "max value", "min value"] +# Regression test: check alignment of basic field after a sequence field is correct +int32 alignment_check diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/WStrings.msg b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/WStrings.msg new file mode 100644 index 0000000000000000000000000000000000000000..666d0ec67abb333a42dad00f9c17b045c5197b29 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/msg/WStrings.msg @@ -0,0 +1,10 @@ +wstring wstring_value +wstring wstring_value_default1 "Hello world!" +wstring wstring_value_default2 "Hellö wörld!" +wstring wstring_value_default3 "ハローワールド" +#wstring WSTRING_CONST="Hello world!" +#wstring<=22 bounded_wstring_value +#wstring<=22 bounded_wstring_value_default1 "Hello world!" +wstring[3] array_of_wstrings +wstring[<=3] bounded_sequence_of_wstrings +wstring[] unbounded_sequence_of_wstrings diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Arrays.srv b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Arrays.srv new file mode 100644 index 0000000000000000000000000000000000000000..4c96820ae065cffa1946ea10c01ca50840b7d134 --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Arrays.srv @@ -0,0 +1,63 @@ +bool[3] bool_values +byte[3] byte_values +char[3] char_values +float32[3] float32_values +float64[3] float64_values +int8[3] int8_values +uint8[3] uint8_values +int16[3] int16_values +uint16[3] uint16_values +int32[3] int32_values +uint32[3] uint32_values +int64[3] int64_values +uint64[3] uint64_values +string[3] string_values +BasicTypes[3] basic_types_values +Constants[3] constants_values +Defaults[3] defaults_values +bool[3] bool_values_default [false, true, false] +byte[3] byte_values_default [0, 1, 255] +char[3] char_values_default [0, 1, 127] +float32[3] float32_values_default [1.125, 0.0, -1.125] +float64[3] float64_values_default [3.1415, 0.0, -3.1415] +int8[3] int8_values_default [0, 127, -128] +uint8[3] uint8_values_default [0, 1, 255] +int16[3] int16_values_default [0, 32767, -32768] +uint16[3] uint16_values_default [0, 1, 65535] +int32[3] int32_values_default [0, 2147483647, -2147483648] +uint32[3] uint32_values_default [0, 1, 4294967295] +int64[3] int64_values_default [0, 9223372036854775807, -9223372036854775808] +uint64[3] uint64_values_default [0, 1, 18446744073709551615] +string[3] string_values_default ["", "max value", "min value"] +--- +bool[3] bool_values +byte[3] byte_values +char[3] char_values +float32[3] float32_values +float64[3] float64_values +int8[3] int8_values +uint8[3] uint8_values +int16[3] int16_values +uint16[3] uint16_values +int32[3] int32_values +uint32[3] uint32_values +int64[3] int64_values +uint64[3] uint64_values +string[3] string_values +BasicTypes[3] basic_types_values +Constants[3] constants_values +Defaults[3] defaults_values +bool[3] bool_values_default [false, true, false] +byte[3] byte_values_default [0, 1, 255] +char[3] char_values_default [0, 1, 127] +float32[3] float32_values_default [1.125, 0.0, -1.125] +float64[3] float64_values_default [3.1415, 0.0, -3.1415] +int8[3] int8_values_default [0, 127, -128] +uint8[3] uint8_values_default [0, 1, 255] +int16[3] int16_values_default [0, 32767, -32768] +uint16[3] uint16_values_default [0, 1, 65535] +int32[3] int32_values_default [0, 2147483647, -2147483648] +uint32[3] uint32_values_default [0, 1, 4294967295] +int64[3] int64_values_default [0, 9223372036854775807, -9223372036854775808] +uint64[3] uint64_values_default [0, 1, 18446744073709551615] +string[3] string_values_default ["", "max value", "min value"] diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/BasicTypes.srv b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/BasicTypes.srv new file mode 100644 index 0000000000000000000000000000000000000000..d00cfd539ac4bc8d90accb3d5c7590200874cd4b --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/BasicTypes.srv @@ -0,0 +1,29 @@ +bool bool_value +byte byte_value +char char_value +float32 float32_value +float64 float64_value +int8 int8_value +uint8 uint8_value +int16 int16_value +uint16 uint16_value +int32 int32_value +uint32 uint32_value +int64 int64_value +uint64 uint64_value +string string_value +--- +bool bool_value +byte byte_value +char char_value +float32 float32_value +float64 float64_value +int8 int8_value +uint8 uint8_value +int16 int16_value +uint16 uint16_value +int32 int32_value +uint32 uint32_value +int64 int64_value +uint64 uint64_value +string string_value diff --git a/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Empty.srv b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Empty.srv new file mode 100644 index 0000000000000000000000000000000000000000..ed97d539c095cf1413af30cc23dea272095b97dd --- /dev/null +++ b/libraries/extensions/ros2-bridge/msg-gen/test_msgs/srv/Empty.srv @@ -0,0 +1 @@ +--- diff --git a/libraries/extensions/ros2-bridge/python/Cargo.toml b/libraries/extensions/ros2-bridge/python/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..fd1b8627fc7f491e0c3940c7eec89d460b5b201c --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-ros2-bridge-python" +version = "0.1.0" +edition = "2021" + + +[dependencies] +dora-ros2-bridge = { path = "..", default-features = false } +dora-ros2-bridge-msg-gen = { path = "../msg-gen" } +pyo3 = { workspace = true, features = ["eyre", "abi3-py37", "serde"] } +eyre = "0.6" +serde = "1.0.166" +arrow = { workspace = true, features = ["pyarrow"] } +futures = "0.3.28" + +[dev-dependencies] +serde_assert = "0.7.1" diff --git a/libraries/extensions/ros2-bridge/python/src/lib.rs b/libraries/extensions/ros2-bridge/python/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ee7f683090dbfe28e915778a6a11ee13e650bf72 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/lib.rs @@ -0,0 +1,446 @@ +use std::{ + borrow::Cow, + collections::HashMap, + path::{Path, PathBuf}, + sync::Arc, +}; + +use ::dora_ros2_bridge::{ros2_client, rustdds}; +use arrow::{ + array::{make_array, ArrayData}, + pyarrow::{FromPyArrow, ToPyArrow}, +}; +use dora_ros2_bridge_msg_gen::types::Message; +use eyre::{eyre, Context, ContextCompat, Result}; +use futures::{Stream, StreamExt}; +use pyo3::{ + prelude::{pyclass, pymethods}, + types::{PyAnyMethods, PyDict, PyList, PyModule, PyModuleMethods}, + Bound, PyAny, PyObject, PyResult, Python, +}; +use typed::{deserialize::StructDeserializer, TypeInfo, TypedValue}; + +pub mod qos; +pub mod typed; + +/// ROS2 Context holding all messages definition for receiving and sending messages to ROS2. +/// +/// By default, Ros2Context will use env `AMENT_PREFIX_PATH` to search for message definition. +/// +/// AMENT_PREFIX_PATH folder structure should be the following: +/// +/// - For messages: /msg/.msg +/// - For services: /srv/.srv +/// +/// You can also use `ros_paths` if you don't want to use env variable. +/// +/// warning:: +/// dora Ros2 bridge functionality is considered **unstable**. It may be changed +/// at any point without it being considered a breaking change. +/// +/// ```python +/// context = Ros2Context() +/// ``` +/// +/// :type ros_paths: typing.List[str], optional +/// +#[pyclass] +pub struct Ros2Context { + context: ros2_client::Context, + messages: Arc>>, +} + +#[pymethods] +impl Ros2Context { + /// Create a new context + #[new] + pub fn new(ros_paths: Option>) -> eyre::Result { + Python::with_gil(|py| -> Result<()> { + let warnings = py + .import_bound("warnings") + .wrap_err("failed to import `warnings` module")?; + warnings + .call_method1("warn", ("dora-rs ROS2 Bridge is unstable and may change at any point without it being considered a breaking change",)) + .wrap_err("failed to call `warnings.warn` module")?; + Ok(()) + })?; + let ament_prefix_path = std::env::var("AMENT_PREFIX_PATH"); + let empty = String::new(); + + let paths: Vec<_> = match &ros_paths { + Some(paths) => paths.iter().map(|p| p.as_path()).collect(), + None => { + let ament_prefix_path_parsed = match &ament_prefix_path { + Ok(path) => path, + Err(std::env::VarError::NotPresent) => &empty, + Err(std::env::VarError::NotUnicode(s)) => { + eyre::bail!( + "AMENT_PREFIX_PATH is not valid unicode: `{}`", + s.to_string_lossy() + ); + } + }; + + ament_prefix_path_parsed.split(':').map(Path::new).collect() + } + }; + + let packages = dora_ros2_bridge_msg_gen::get_packages(&paths) + .map_err(|err| eyre!(err)) + .context("failed to parse ROS2 message types")?; + + let mut messages = HashMap::new(); + for message in packages.into_iter().flat_map(|p| p.messages.into_iter()) { + let entry: &mut HashMap = + messages.entry(message.package.clone()).or_default(); + entry.insert(message.name.clone(), message); + } + + Ok(Self { + context: ros2_client::Context::new()?, + messages: Arc::new(messages), + }) + } + + /// Create a new ROS2 node + /// + /// ```python + /// ros2_node = ros2_context.new_node( + /// "turtle_teleop", + /// "/ros2_demo", + /// Ros2NodeOptions(rosout=True), + /// ) + /// ``` + /// + /// warning:: + /// dora Ros2 bridge functionality is considered **unstable**. It may be changed + /// at any point without it being considered a breaking change. + /// + /// :type name: str + /// :type namespace: str + /// :type options: dora.Ros2NodeOptions + /// :rtype: dora.Ros2Node + pub fn new_node( + &self, + name: &str, + namespace: &str, + options: Ros2NodeOptions, + ) -> eyre::Result { + let name = ros2_client::NodeName::new(namespace, name) + .map_err(|err| eyre!("invalid node name: {err}"))?; + Ok(Ros2Node { + node: self + .context + .new_node(name, options.into()) + .map_err(|e| eyre::eyre!("failed to create ROS2 node: {e:?}"))?, + messages: self.messages.clone(), + }) + } +} + +/// ROS2 Node +/// +/// warnings:: +/// - dora Ros2 bridge functionality is considered **unstable**. It may be changed +/// at any point without it being considered a breaking change. +/// - There's a known issue about ROS2 nodes not being discoverable by ROS2 +/// See: https://github.com/jhelovuo/ros2-client/issues/4 +/// +#[pyclass] +pub struct Ros2Node { + node: ros2_client::Node, + messages: Arc>>, +} + +#[pymethods] +impl Ros2Node { + /// Create a ROS2 topic to connect to. + /// + /// ```python + /// turtle_twist_topic = ros2_node.create_topic( + /// "/turtle1/cmd_vel", "geometry_msgs/Twist", topic_qos + /// ) + /// ``` + /// + /// :type name: str + /// :type message_type: str + /// :type qos: dora.Ros2QosPolicies + /// :rtype: dora.Ros2Topic + pub fn create_topic( + &self, + name: &str, + message_type: String, + qos: qos::Ros2QosPolicies, + ) -> eyre::Result { + let (namespace_name, message_name) = + match (message_type.split_once('/'), message_type.split_once("::")) { + (Some(msg), None) => msg, + (None, Some(msg)) => msg, + _ => eyre::bail!("Expected message type in the format `namespace/message` or `namespace::message`, such as `std_msgs/UInt8` but got: {}", message_type), + }; + + let message_type_name = ros2_client::MessageTypeName::new(namespace_name, message_name); + let topic_name = ros2_client::Name::parse(name) + .map_err(|err| eyre!("failed to parse ROS2 topic name: {err}"))?; + let topic = self + .node + .create_topic(&topic_name, message_type_name, &qos.into())?; + let type_info = TypeInfo { + package_name: namespace_name.to_owned().into(), + message_name: message_name.to_owned().into(), + messages: self.messages.clone(), + }; + + Ok(Ros2Topic { topic, type_info }) + } + + /// Create a ROS2 publisher + /// + /// ```python + /// pose_publisher = ros2_node.create_publisher(turtle_pose_topic) + /// ``` + /// warnings: + /// - dora Ros2 bridge functionality is considered **unstable**. It may be changed + /// at any point without it being considered a breaking change. + /// + /// :type topic: dora.Ros2Topic + /// :type qos: dora.Ros2QosPolicies, optional + /// :rtype: dora.Ros2Publisher + pub fn create_publisher( + &mut self, + topic: &Ros2Topic, + qos: Option, + ) -> eyre::Result { + let publisher = self + .node + .create_publisher(&topic.topic, qos.map(Into::into))?; + Ok(Ros2Publisher { + publisher, + type_info: topic.type_info.clone(), + }) + } + + /// Create a ROS2 subscription + /// + /// ```python + /// pose_reader = ros2_node.create_subscription(turtle_pose_topic) + /// ``` + /// + /// warnings: + /// - dora Ros2 bridge functionality is considered **unstable**. It may be changed + /// at any point without it being considered a breaking change. + /// + /// :type topic: dora.Ros2Topic + /// :type qos: dora.Ros2QosPolicies, optional + /// :rtype: dora.Ros2Subscription + pub fn create_subscription( + &mut self, + topic: &Ros2Topic, + qos: Option, + ) -> eyre::Result { + let subscription = self + .node + .create_subscription(&topic.topic, qos.map(Into::into))?; + Ok(Ros2Subscription { + subscription: Some(subscription), + deserializer: StructDeserializer::new(Cow::Owned(topic.type_info.clone())), + }) + } +} + +/// ROS2 Node Options +/// :type rosout: bool, optional +/// +#[derive(Debug, Clone, Default)] +#[pyclass] +#[non_exhaustive] +pub struct Ros2NodeOptions { + pub rosout: bool, +} + +#[pymethods] +impl Ros2NodeOptions { + #[new] + pub fn new(rosout: Option) -> Self { + Self { + rosout: rosout.unwrap_or(false), + } + } +} + +impl From for ros2_client::NodeOptions { + fn from(value: Ros2NodeOptions) -> Self { + ros2_client::NodeOptions::new().enable_rosout(value.rosout) + } +} + +/// ROS2 Topic +/// :type rosout: bool, optional +/// +/// warnings: +/// - dora Ros2 bridge functionality is considered **unstable**. It may be changed +/// at any point without it being considered a breaking change. +#[pyclass] +#[non_exhaustive] +pub struct Ros2Topic { + topic: rustdds::Topic, + type_info: TypeInfo<'static>, +} + +/// ROS2 Publisher +/// +/// warnings: +/// - dora Ros2 bridge functionality is considered **unstable**. It may be changed +/// at any point without it being considered a breaking change. +#[pyclass] +#[non_exhaustive] +pub struct Ros2Publisher { + publisher: ros2_client::Publisher>, + type_info: TypeInfo<'static>, +} + +#[pymethods] +impl Ros2Publisher { + /// Publish a message into ROS2 topic. + /// + /// Remember that the data format should respect the structure of the ROS2 message using an arrow Structure. + /// + /// ex: + /// ```python + /// gripper_command.publish( + /// pa.array( + /// [ + /// { + /// "name": "gripper", + /// "cmd": np.float32(5), + /// } + /// ] + /// ), + /// ) + /// ``` + /// + /// :type data: pyarrow.Array + /// :rtype: None + /// + pub fn publish(&self, data: Bound<'_, PyAny>) -> eyre::Result<()> { + let pyarrow = PyModule::import_bound(data.py(), "pyarrow")?; + + let data = if data.is_instance_of::() { + // convert to arrow struct scalar + pyarrow.getattr("scalar")?.call1((data,))? + } else { + data + }; + + let data = if data.is_instance(&pyarrow.getattr("StructScalar")?)? { + // convert to arrow array + let list = PyList::new_bound(data.py(), [data]); + pyarrow.getattr("array")?.call1((list,))? + } else { + data + }; + + let value = arrow::array::ArrayData::from_pyarrow_bound(&data)?; + //// add type info to ensure correct serialization (e.g. struct types + //// and map types need to be serialized differently) + let typed_value = TypedValue { + value: &make_array(value), + type_info: &self.type_info, + }; + + self.publisher + .publish(typed_value) + .map_err(|e| e.forget_data()) + .context("publish failed")?; + Ok(()) + } +} + +/// ROS2 Subscription +/// +/// +/// warnings: +/// - dora Ros2 bridge functionality is considered **unstable**. It may be changed +/// at any point without it being considered a breaking change. +#[pyclass] +#[non_exhaustive] +pub struct Ros2Subscription { + deserializer: StructDeserializer<'static>, + subscription: Option>, +} + +#[pymethods] +impl Ros2Subscription { + pub fn next(&self, py: Python) -> eyre::Result> { + let message = self + .subscription + .as_ref() + .context("subscription was already used")? + .take_seed(self.deserializer.clone()) + .context("failed to take next message from subscription")?; + let Some((value, _info)) = message else { + return Ok(None); + }; + + let message = value.to_pyarrow(py)?; + // TODO: add `info` + + Ok(Some(message)) + } +} + +impl Ros2Subscription { + pub fn into_stream(&mut self) -> eyre::Result { + let subscription = self + .subscription + .take() + .context("subscription was already used")?; + + Ok(Ros2SubscriptionStream { + deserializer: self.deserializer.clone(), + subscription, + }) + } +} + +pub struct Ros2SubscriptionStream { + deserializer: StructDeserializer<'static>, + subscription: ros2_client::Subscription, +} + +impl Ros2SubscriptionStream { + pub fn as_stream( + &self, + ) -> impl Stream> + '_ + { + self.subscription + .async_stream_seed(self.deserializer.clone()) + } +} + +impl Stream for Ros2SubscriptionStream { + type Item = Result<(ArrayData, ros2_client::MessageInfo), rustdds::dds::ReadError>; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let s = self.as_stream(); + futures::pin_mut!(s); + s.poll_next_unpin(cx) + } +} + +pub fn create_dora_ros2_bridge_module(m: &Bound<'_, PyModule>) -> PyResult<()> { + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + + Ok(()) +} diff --git a/libraries/extensions/ros2-bridge/python/src/qos.rs b/libraries/extensions/ros2-bridge/python/src/qos.rs new file mode 100644 index 0000000000000000000000000000000000000000..626934f58c9de14b74dfe9647551949dfa872bb4 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/qos.rs @@ -0,0 +1,131 @@ +use ::dora_ros2_bridge::rustdds::{self, policy}; +use pyo3::prelude::{pyclass, pymethods}; + +/// ROS2 QoS Policy +/// +/// :type durability: dora.Ros2Durability, optional +/// :type liveliness: dora.Ros2Liveliness, optional +/// :type reliable: bool, optional +/// :type keep_all: bool, optional +/// :type lease_duration: float, optional +/// :type max_blocking_time: float, optional +/// :type keep_last: int, optional +/// :rtype: dora.Ros2QoSPolicies +/// +#[derive(Debug, Clone)] +#[pyclass] +#[non_exhaustive] +pub struct Ros2QosPolicies { + pub durability: Ros2Durability, + pub liveliness: Ros2Liveliness, + pub lease_duration: f64, + pub reliable: bool, + pub max_blocking_time: f64, + pub keep_all: bool, + pub keep_last: i32, +} + +#[pymethods] +impl Ros2QosPolicies { + #[new] + pub fn new( + durability: Option, + liveliness: Option, + reliable: Option, + keep_all: Option, + lease_duration: Option, + max_blocking_time: Option, + keep_last: Option, + ) -> Self { + Self { + durability: durability.unwrap_or(Ros2Durability::Volatile), + liveliness: liveliness.unwrap_or(Ros2Liveliness::Automatic), + lease_duration: lease_duration.unwrap_or(f64::INFINITY), + reliable: reliable.unwrap_or(false), + max_blocking_time: max_blocking_time.unwrap_or(0.0), + keep_all: keep_all.unwrap_or(false), + keep_last: keep_last.unwrap_or(1), + } + } +} + +impl From for rustdds::QosPolicies { + fn from(value: Ros2QosPolicies) -> Self { + rustdds::QosPolicyBuilder::new() + .durability(value.durability.into()) + .liveliness(value.liveliness.convert(value.lease_duration)) + .reliability(if value.reliable { + policy::Reliability::Reliable { + max_blocking_time: rustdds::Duration::from_frac_seconds( + value.max_blocking_time, + ), + } + } else { + policy::Reliability::BestEffort + }) + .history(if value.keep_all { + policy::History::KeepAll + } else { + policy::History::KeepLast { + depth: value.keep_last, + } + }) + .build() + } +} + +/// DDS 2.2.3.4 DURABILITY +/// +/// :rtype: dora.Ros2Durability +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[pyclass] +pub enum Ros2Durability { + Volatile, + TransientLocal, + Transient, + Persistent, +} + +/// :type value: dora.Ros2Durability +/// :rtype: dora.Ros2Durability +impl From for policy::Durability { + /// :type value: dora.Ros2Durability + /// :rtype: dora.Ros2Durability + fn from(value: Ros2Durability) -> Self { + match value { + Ros2Durability::Volatile => policy::Durability::Volatile, + Ros2Durability::TransientLocal => policy::Durability::TransientLocal, + Ros2Durability::Transient => policy::Durability::Transient, + Ros2Durability::Persistent => policy::Durability::Persistent, + } + } +} + +/// DDS 2.2.3.11 LIVELINESS +/// :rtype: dora.Ros2Liveliness +#[derive(Copy, Clone, Debug, PartialEq)] +#[pyclass] +pub enum Ros2Liveliness { + Automatic, + ManualByParticipant, + ManualByTopic, +} + +impl Ros2Liveliness { + /// :type lease_duration: float + /// :rtype: dora.Ros2Liveliness + fn convert(self, lease_duration: f64) -> policy::Liveliness { + let lease_duration = if lease_duration.is_infinite() { + rustdds::Duration::INFINITE + } else { + rustdds::Duration::from_frac_seconds(lease_duration) + }; + match self { + Ros2Liveliness::Automatic => policy::Liveliness::Automatic { lease_duration }, + Ros2Liveliness::ManualByParticipant => { + policy::Liveliness::ManualByParticipant { lease_duration } + } + Ros2Liveliness::ManualByTopic => policy::Liveliness::ManualByTopic { lease_duration }, + } + } +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/deserialize/array.rs b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/array.rs new file mode 100644 index 0000000000000000000000000000000000000000..170092dc38cc9578aadc7d00543d3158107a5a35 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/array.rs @@ -0,0 +1,28 @@ +use arrow::array::ArrayData; +use dora_ros2_bridge_msg_gen::types::sequences; + +use crate::typed::TypeInfo; + +use super::sequence::SequenceVisitor; + +pub struct ArrayDeserializer<'a> { + pub array_type: &'a sequences::Array, + pub type_info: &'a TypeInfo<'a>, +} + +impl<'de> serde::de::DeserializeSeed<'de> for ArrayDeserializer<'_> { + type Value = ArrayData; + + fn deserialize(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_tuple( + self.array_type.size, + SequenceVisitor { + item_type: &self.array_type.value_type, + type_info: self.type_info, + }, + ) + } +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/deserialize/mod.rs b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..db9249d1d40ccaabb509db977138d06335e2f5d2 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/mod.rs @@ -0,0 +1,163 @@ +use super::{TypeInfo, DUMMY_STRUCT_NAME}; +use arrow::{ + array::{make_array, ArrayData, StructArray}, + datatypes::Field, +}; +use core::fmt; +use std::{borrow::Cow, collections::HashMap, fmt::Display, sync::Arc}; + +mod array; +mod primitive; +mod sequence; +mod string; + +#[derive(Debug, Clone)] +pub struct StructDeserializer<'a> { + type_info: Cow<'a, TypeInfo<'a>>, +} + +impl<'a> StructDeserializer<'a> { + pub fn new(type_info: Cow<'a, TypeInfo<'a>>) -> Self { + Self { type_info } + } +} + +impl<'de> serde::de::DeserializeSeed<'de> for StructDeserializer<'_> { + type Value = ArrayData; + + fn deserialize(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let empty = HashMap::new(); + let package_messages = self + .type_info + .messages + .get(self.type_info.package_name.as_ref()) + .unwrap_or(&empty); + let message = package_messages + .get(self.type_info.message_name.as_ref()) + .ok_or_else(|| { + error(format!( + "could not find message type {}::{}", + self.type_info.package_name, self.type_info.message_name + )) + })?; + + let visitor = StructVisitor { + type_info: self.type_info.as_ref(), + }; + deserializer.deserialize_tuple_struct(DUMMY_STRUCT_NAME, message.members.len(), visitor) + } +} + +struct StructVisitor<'a> { + type_info: &'a TypeInfo<'a>, +} + +impl<'a, 'de> serde::de::Visitor<'de> for StructVisitor<'a> { + type Value = ArrayData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a struct encoded as TupleStruct") + } + + fn visit_seq(self, mut data: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let empty = HashMap::new(); + let package_messages = self + .type_info + .messages + .get(self.type_info.package_name.as_ref()) + .unwrap_or(&empty); + let message = package_messages + .get(self.type_info.message_name.as_ref()) + .ok_or_else(|| { + error(format!( + "could not find message type {}::{}", + self.type_info.package_name, self.type_info.message_name + )) + })?; + + let mut fields = vec![]; + for member in &message.members { + let value = match &member.r#type { + dora_ros2_bridge_msg_gen::types::MemberType::NestableType(t) => match t { + dora_ros2_bridge_msg_gen::types::primitives::NestableType::BasicType(t) => { + data.next_element_seed(primitive::PrimitiveDeserializer(t))? + } + dora_ros2_bridge_msg_gen::types::primitives::NestableType::NamedType(name) => { + data.next_element_seed(StructDeserializer { + type_info: Cow::Owned(TypeInfo { + package_name: Cow::Borrowed(&self.type_info.package_name), + message_name: Cow::Borrowed(&name.0), + messages: self.type_info.messages.clone(), + }), + })? + } + dora_ros2_bridge_msg_gen::types::primitives::NestableType::NamespacedType( + reference, + ) => { + if reference.namespace != "msg" { + return Err(error(format!( + "struct field {} references non-message type {reference:?}", + member.name + ))); + } + data.next_element_seed(StructDeserializer { + type_info: Cow::Owned(TypeInfo { + package_name: Cow::Borrowed(&reference.package), + message_name: Cow::Borrowed(&reference.name), + messages: self.type_info.messages.clone(), + }), + })? + } + dora_ros2_bridge_msg_gen::types::primitives::NestableType::GenericString(t) => { + match t { + dora_ros2_bridge_msg_gen::types::primitives::GenericString::String | dora_ros2_bridge_msg_gen::types::primitives::GenericString::BoundedString(_)=> { + data.next_element_seed(string::StringDeserializer)? + }, + dora_ros2_bridge_msg_gen::types::primitives::GenericString::WString => todo!("deserialize WString"), + dora_ros2_bridge_msg_gen::types::primitives::GenericString::BoundedWString(_) => todo!("deserialize BoundedWString"), + } + } + }, + dora_ros2_bridge_msg_gen::types::MemberType::Array(a) => { + data.next_element_seed(array::ArrayDeserializer{ array_type : a, type_info: self.type_info})? + }, + dora_ros2_bridge_msg_gen::types::MemberType::Sequence(s) => { + data.next_element_seed(sequence::SequenceDeserializer{item_type: &s.value_type, type_info: self.type_info})? + }, + dora_ros2_bridge_msg_gen::types::MemberType::BoundedSequence(s) => { + data.next_element_seed(sequence::SequenceDeserializer{ item_type: &s.value_type, type_info: self.type_info})? + }, + }; + + let value = value.ok_or_else(|| { + error(format!( + "struct member {} not present in message", + member.name + )) + })?; + + fields.push(( + Arc::new(Field::new(&member.name, value.data_type().clone(), true)), + make_array(value), + )); + } + + let struct_array: StructArray = fields.into(); + + Ok(struct_array.into()) + } +} + +fn error(e: T) -> E +where + T: Display, + E: serde::de::Error, +{ + serde::de::Error::custom(e) +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/deserialize/primitive.rs b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/primitive.rs new file mode 100644 index 0000000000000000000000000000000000000000..7f13b575f72504adcf9decdf571fd99451ddca64 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/primitive.rs @@ -0,0 +1,155 @@ +use arrow::array::{ + ArrayData, BooleanBuilder, Float32Builder, Float64Builder, Int16Builder, Int32Builder, + Int64Builder, Int8Builder, NullArray, UInt16Builder, UInt32Builder, UInt64Builder, + UInt8Builder, +}; +use core::fmt; +use dora_ros2_bridge_msg_gen::types::primitives::BasicType; + +pub struct PrimitiveDeserializer<'a>(pub &'a BasicType); + +impl<'de> serde::de::DeserializeSeed<'de> for PrimitiveDeserializer<'_> { + type Value = ArrayData; + + fn deserialize(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + match self.0 { + BasicType::I8 => deserializer.deserialize_i8(PrimitiveValueVisitor), + BasicType::I16 => deserializer.deserialize_i16(PrimitiveValueVisitor), + BasicType::I32 => deserializer.deserialize_i32(PrimitiveValueVisitor), + BasicType::I64 => deserializer.deserialize_i64(PrimitiveValueVisitor), + BasicType::U8 | BasicType::Char | BasicType::Byte => { + deserializer.deserialize_u8(PrimitiveValueVisitor) + } + BasicType::U16 => deserializer.deserialize_u16(PrimitiveValueVisitor), + BasicType::U32 => deserializer.deserialize_u32(PrimitiveValueVisitor), + BasicType::U64 => deserializer.deserialize_u64(PrimitiveValueVisitor), + BasicType::F32 => deserializer.deserialize_f32(PrimitiveValueVisitor), + BasicType::F64 => deserializer.deserialize_f64(PrimitiveValueVisitor), + BasicType::Bool => deserializer.deserialize_bool(PrimitiveValueVisitor), + } + } +} + +/// Based on https://docs.rs/serde_yaml/0.9.22/src/serde_yaml/value/de.rs.html#14-121 +struct PrimitiveValueVisitor; + +impl<'de> serde::de::Visitor<'de> for PrimitiveValueVisitor { + type Value = ArrayData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a primitive value") + } + + fn visit_bool(self, b: bool) -> Result + where + E: serde::de::Error, + { + let mut array = BooleanBuilder::new(); + array.append_value(b); + Ok(array.finish().into()) + } + + fn visit_i8(self, u: i8) -> Result + where + E: serde::de::Error, + { + let mut array = Int8Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + + fn visit_i16(self, u: i16) -> Result + where + E: serde::de::Error, + { + let mut array = Int16Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + fn visit_i32(self, u: i32) -> Result + where + E: serde::de::Error, + { + let mut array = Int32Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + fn visit_i64(self, i: i64) -> Result + where + E: serde::de::Error, + { + let mut array = Int64Builder::new(); + array.append_value(i); + Ok(array.finish().into()) + } + + fn visit_u8(self, u: u8) -> Result + where + E: serde::de::Error, + { + let mut array = UInt8Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + fn visit_u16(self, u: u16) -> Result + where + E: serde::de::Error, + { + let mut array = UInt16Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + fn visit_u32(self, u: u32) -> Result + where + E: serde::de::Error, + { + let mut array = UInt32Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + fn visit_u64(self, u: u64) -> Result + where + E: serde::de::Error, + { + let mut array = UInt64Builder::new(); + array.append_value(u); + Ok(array.finish().into()) + } + + fn visit_f32(self, f: f32) -> Result + where + E: serde::de::Error, + { + let mut array = Float32Builder::new(); + array.append_value(f); + Ok(array.finish().into()) + } + + fn visit_f64(self, f: f64) -> Result + where + E: serde::de::Error, + { + let mut array = Float64Builder::new(); + array.append_value(f); + Ok(array.finish().into()) + } + + fn visit_unit(self) -> Result + where + E: serde::de::Error, + { + let array = NullArray::new(0); + Ok(array.into()) + } + + fn visit_none(self) -> Result + where + E: serde::de::Error, + { + let array = NullArray::new(0); + Ok(array.into()) + } +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/deserialize/sequence.rs b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/sequence.rs new file mode 100644 index 0000000000000000000000000000000000000000..a55921968a60d4a54d5dd79b6ec3dcb5c2994062 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/sequence.rs @@ -0,0 +1,163 @@ +use arrow::{ + array::{ + Array, ArrayData, BooleanBuilder, ListArray, ListBuilder, PrimitiveBuilder, StringBuilder, + }, + buffer::OffsetBuffer, + datatypes::{self, ArrowPrimitiveType, Field}, +}; +use core::fmt; +use dora_ros2_bridge_msg_gen::types::primitives::{self, BasicType, NestableType}; +use serde::Deserialize; +use std::{borrow::Cow, ops::Deref, sync::Arc}; + +use crate::typed::TypeInfo; + +use super::{error, StructDeserializer}; + +pub struct SequenceDeserializer<'a> { + pub item_type: &'a NestableType, + pub type_info: &'a TypeInfo<'a>, +} + +impl<'de> serde::de::DeserializeSeed<'de> for SequenceDeserializer<'_> { + type Value = ArrayData; + + fn deserialize(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_seq(SequenceVisitor { + item_type: self.item_type, + type_info: self.type_info, + }) + } +} + +pub struct SequenceVisitor<'a> { + pub item_type: &'a NestableType, + pub type_info: &'a TypeInfo<'a>, +} + +impl<'de> serde::de::Visitor<'de> for SequenceVisitor<'_> { + type Value = ArrayData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + match &self.item_type { + NestableType::BasicType(t) => match t { + BasicType::I8 => deserialize_primitive_seq::<_, datatypes::Int8Type>(seq), + BasicType::I16 => deserialize_primitive_seq::<_, datatypes::Int16Type>(seq), + BasicType::I32 => deserialize_primitive_seq::<_, datatypes::Int32Type>(seq), + BasicType::I64 => deserialize_primitive_seq::<_, datatypes::Int64Type>(seq), + BasicType::U8 | BasicType::Char | BasicType::Byte => { + deserialize_primitive_seq::<_, datatypes::UInt8Type>(seq) + } + BasicType::U16 => deserialize_primitive_seq::<_, datatypes::UInt16Type>(seq), + BasicType::U32 => deserialize_primitive_seq::<_, datatypes::UInt32Type>(seq), + BasicType::U64 => deserialize_primitive_seq::<_, datatypes::UInt64Type>(seq), + BasicType::F32 => deserialize_primitive_seq::<_, datatypes::Float32Type>(seq), + BasicType::F64 => deserialize_primitive_seq::<_, datatypes::Float64Type>(seq), + BasicType::Bool => { + let mut array = BooleanBuilder::new(); + while let Some(value) = seq.next_element()? { + array.append_value(value); + } + // wrap array into list of length 1 + let mut list = ListBuilder::new(array); + list.append(true); + Ok(list.finish().into()) + } + }, + NestableType::NamedType(name) => { + let deserializer = StructDeserializer { + type_info: Cow::Owned(TypeInfo { + package_name: Cow::Borrowed(&self.type_info.package_name), + message_name: Cow::Borrowed(&name.0), + messages: self.type_info.messages.clone(), + }), + }; + deserialize_struct_seq(&mut seq, deserializer) + } + NestableType::NamespacedType(reference) => { + if reference.namespace != "msg" { + return Err(error(format!( + "sequence item references non-message type {reference:?}", + ))); + } + let deserializer = StructDeserializer { + type_info: Cow::Owned(TypeInfo { + package_name: Cow::Borrowed(&reference.package), + message_name: Cow::Borrowed(&reference.name), + messages: self.type_info.messages.clone(), + }), + }; + deserialize_struct_seq(&mut seq, deserializer) + } + NestableType::GenericString(t) => match t { + primitives::GenericString::String | primitives::GenericString::BoundedString(_) => { + let mut array = StringBuilder::new(); + while let Some(value) = seq.next_element::()? { + array.append_value(value); + } + // wrap array into list of length 1 + let mut list = ListBuilder::new(array); + list.append(true); + Ok(list.finish().into()) + } + primitives::GenericString::WString => todo!("deserialize sequence of WString"), + primitives::GenericString::BoundedWString(_) => { + todo!("deserialize sequence of BoundedWString") + } + }, + } + } +} + +fn deserialize_struct_seq<'de, A>( + seq: &mut A, + deserializer: StructDeserializer<'_>, +) -> Result>::Error> +where + A: serde::de::SeqAccess<'de>, +{ + let mut values = Vec::new(); + while let Some(value) = seq.next_element_seed(deserializer.clone())? { + values.push(arrow::array::make_array(value)); + } + let refs: Vec<_> = values.iter().map(|a| a.deref()).collect(); + let concatenated = arrow::compute::concat(&refs).map_err(super::error)?; + + let list = ListArray::try_new( + Arc::new(Field::new("item", concatenated.data_type().clone(), true)), + OffsetBuffer::from_lengths([concatenated.len()]), + Arc::new(concatenated), + None, + ) + .map_err(error)?; + + Ok(list.to_data()) +} + +fn deserialize_primitive_seq<'de, S, T>( + mut seq: S, +) -> Result>::Error> +where + S: serde::de::SeqAccess<'de>, + T: ArrowPrimitiveType, + T::Native: Deserialize<'de>, +{ + let mut array = PrimitiveBuilder::::new(); + while let Some(value) = seq.next_element::()? { + array.append_value(value); + } + // wrap array into list of length 1 + let mut list = ListBuilder::new(array); + list.append(true); + Ok(list.finish().into()) +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/deserialize/string.rs b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/string.rs new file mode 100644 index 0000000000000000000000000000000000000000..646ea38d3b0ea184d1e6b08978855b11da64eed8 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/deserialize/string.rs @@ -0,0 +1,44 @@ +use arrow::array::{ArrayData, StringBuilder}; +use core::fmt; + +pub struct StringDeserializer; + +impl<'de> serde::de::DeserializeSeed<'de> for StringDeserializer { + type Value = ArrayData; + + fn deserialize(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(StringVisitor) + } +} + +/// Based on https://docs.rs/serde_yaml/0.9.22/src/serde_yaml/value/de.rs.html#14-121 +struct StringVisitor; + +impl<'de> serde::de::Visitor<'de> for StringVisitor { + type Value = ArrayData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string value") + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + let mut array = StringBuilder::new(); + array.append_value(s); + Ok(array.finish().into()) + } + + fn visit_string(self, s: String) -> Result + where + E: serde::de::Error, + { + let mut array = StringBuilder::new(); + array.append_value(s); + Ok(array.finish().into()) + } +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/mod.rs b/libraries/extensions/ros2-bridge/python/src/typed/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2b841589ea3ff6ae14ca7cc8da459522d1c77851 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/mod.rs @@ -0,0 +1,122 @@ +use dora_ros2_bridge_msg_gen::types::Message; +use std::{borrow::Cow, collections::HashMap, sync::Arc}; + +pub use serialize::TypedValue; + +pub mod deserialize; +pub mod serialize; + +#[derive(Debug, Clone)] +pub struct TypeInfo<'a> { + pub package_name: Cow<'a, str>, + pub message_name: Cow<'a, str>, + pub messages: Arc>>, +} + +/// Serde requires that struct and field names are known at +/// compile time with a `'static` lifetime, which is not +/// possible in this case. Thus, we need to use dummy names +/// instead. +/// +/// The actual names do not really matter because +/// the CDR format of ROS2 does not encode struct or field +/// names. +const DUMMY_STRUCT_NAME: &str = "struct"; + +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use crate::typed::deserialize::StructDeserializer; + use crate::typed::serialize; + use crate::typed::TypeInfo; + use crate::Ros2Context; + + use arrow::array::make_array; + use arrow::pyarrow::FromPyArrow; + use arrow::pyarrow::ToPyArrow; + + use pyo3::types::IntoPyDict; + use pyo3::types::PyAnyMethods; + use pyo3::types::PyDict; + use pyo3::types::PyList; + use pyo3::types::PyModule; + use pyo3::types::PyTuple; + use pyo3::PyNativeType; + use pyo3::Python; + use serde::de::DeserializeSeed; + use serde::Serialize; + + use serde_assert::Serializer; + use serialize::TypedValue; + + use eyre::{Context, Result}; + use serde_assert::Deserializer; + #[test] + fn test_python_array_code() -> Result<()> { + pyo3::prepare_freethreaded_python(); + let context = Ros2Context::new(None).context("Could not create a context")?; + let messages = context.messages.clone(); + let serializer = Serializer::builder().build(); + + Python::with_gil(|py| -> Result<()> { + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); //.join("test_utils.py"); // Adjust this path as needed + + // Add the Python module's directory to sys.path + py.run_bound( + "import sys; sys.path.append(str(path))", + Some(&[("path", path)].into_py_dict_bound(py)), + None, + )?; + + let my_module = PyModule::import_bound(py, "test_utils")?; + + let arrays: &PyList = my_module.getattr("TEST_ARRAYS")?.extract()?; + for array_wrapper in arrays.iter() { + let arrays: &PyTuple = array_wrapper.extract()?; + let package_name: String = arrays.get_item(0)?.extract()?; + let message_name: String = arrays.get_item(1)?.extract()?; + println!("Checking {}::{}", package_name, message_name); + let in_pyarrow = arrays.get_item(2)?; + + let array = arrow::array::ArrayData::from_pyarrow_bound(&in_pyarrow.as_borrowed())?; + let type_info = TypeInfo { + package_name: package_name.into(), + message_name: message_name.clone().into(), + messages: messages.clone(), + }; + let typed_value = TypedValue { + value: &make_array(array.clone()), + type_info: &type_info.clone(), + }; + + let typed_deserializer = + StructDeserializer::new(std::borrow::Cow::Owned(type_info)); + let tokens = typed_value.serialize(&serializer)?; + let mut deserializer = Deserializer::builder(tokens).build(); + + let out_value = typed_deserializer + .deserialize(&mut deserializer) + .context("could not deserialize array")?; + + let out_pyarrow = out_value.to_pyarrow(py)?; + + let test_utils = PyModule::import_bound(py, "test_utils")?; + let context = PyDict::new_bound(py); + + context.set_item("test_utils", test_utils)?; + context.set_item("in_pyarrow", in_pyarrow)?; + context.set_item("out_pyarrow", out_pyarrow)?; + + let _ = py + .eval_bound( + "test_utils.is_subset(in_pyarrow, out_pyarrow)", + Some(&context), + None, + ) + .context("could not check if it is a subset")?; + } + Ok(()) + }) + } +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/serialize/array.rs b/libraries/extensions/ros2-bridge/python/src/typed/serialize/array.rs new file mode 100644 index 0000000000000000000000000000000000000000..3b2bf889a5b1963bc772def4c6ca0d5a7fd94204 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/serialize/array.rs @@ -0,0 +1,259 @@ +use std::{any::type_name, borrow::Cow, marker::PhantomData, sync::Arc}; + +use arrow::{ + array::{Array, ArrayRef, AsArray, OffsetSizeTrait, PrimitiveArray}, + datatypes::{self, ArrowPrimitiveType}, +}; +use dora_ros2_bridge_msg_gen::types::{ + primitives::{BasicType, GenericString, NestableType}, + sequences, +}; +use serde::ser::SerializeTuple; + +use crate::typed::TypeInfo; + +use super::{error, TypedValue}; + +/// Serialize an array with known size as tuple. +pub struct ArraySerializeWrapper<'a> { + pub array_info: &'a sequences::Array, + pub column: &'a ArrayRef, + pub type_info: &'a TypeInfo<'a>, +} + +impl serde::Serialize for ArraySerializeWrapper<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let entry = if let Some(list) = self.column.as_list_opt::() { + // should match the length of the outer struct + assert_eq!(list.len(), 1); + list.value(0) + } else { + // try as large list + let list = self + .column + .as_list_opt::() + .ok_or_else(|| error("value is not compatible with expected array type"))?; + // should match the length of the outer struct + assert_eq!(list.len(), 1); + list.value(0) + }; + + match &self.array_info.value_type { + NestableType::BasicType(t) => match t { + BasicType::I8 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I16 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I32 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I64 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U8 | BasicType::Char | BasicType::Byte => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U16 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U32 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U64 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::F32 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::F64 => BasicArrayAsTuple { + len: self.array_info.size, + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::Bool => BoolArrayAsTuple { + len: self.array_info.size, + value: &entry, + } + .serialize(serializer), + }, + NestableType::NamedType(name) => { + let array = entry + .as_struct_opt() + .ok_or_else(|| error("not a struct array"))?; + let mut seq = serializer.serialize_tuple(self.array_info.size)?; + for i in 0..array.len() { + let row = array.slice(i, 1); + seq.serialize_element(&TypedValue { + value: &(Arc::new(row) as ArrayRef), + type_info: &crate::typed::TypeInfo { + package_name: Cow::Borrowed(&self.type_info.package_name), + message_name: Cow::Borrowed(&name.0), + messages: self.type_info.messages.clone(), + }, + })?; + } + seq.end() + } + NestableType::NamespacedType(reference) => { + if reference.namespace != "msg" { + return Err(error(format!( + "sequence references non-message type {reference:?}" + ))); + } + + let array = entry + .as_struct_opt() + .ok_or_else(|| error("not a struct array"))?; + let mut seq = serializer.serialize_tuple(self.array_info.size)?; + for i in 0..array.len() { + let row = array.slice(i, 1); + seq.serialize_element(&TypedValue { + value: &(Arc::new(row) as ArrayRef), + type_info: &crate::typed::TypeInfo { + package_name: Cow::Borrowed(&reference.package), + message_name: Cow::Borrowed(&reference.name), + messages: self.type_info.messages.clone(), + }, + })?; + } + seq.end() + } + NestableType::GenericString(s) => match s { + GenericString::String | GenericString::BoundedString(_) => { + match entry.as_string_opt::() { + Some(array) => { + serialize_arrow_string(serializer, array, self.array_info.size) + } + None => { + let array = entry + .as_string_opt::() + .ok_or_else(|| error("expected string array"))?; + serialize_arrow_string(serializer, array, self.array_info.size) + } + } + } + GenericString::WString => { + todo!("serializing WString sequences") + } + GenericString::BoundedWString(_) => todo!("serializing BoundedWString sequences"), + }, + } + } +} + +/// Serializes a primitive array with known size as tuple. +struct BasicArrayAsTuple<'a, T> { + len: usize, + value: &'a ArrayRef, + ty: PhantomData, +} + +impl serde::Serialize for BasicArrayAsTuple<'_, T> +where + T: ArrowPrimitiveType, + T::Native: serde::Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut seq = serializer.serialize_tuple(self.len)?; + let array: &PrimitiveArray = self + .value + .as_primitive_opt() + .ok_or_else(|| error(format!("not a primitive {} array", type_name::())))?; + if array.len() != self.len { + return Err(error(format!( + "expected array with length {}, got length {}", + self.len, + array.len() + ))); + } + + for value in array.values() { + seq.serialize_element(value)?; + } + + seq.end() + } +} + +struct BoolArrayAsTuple<'a> { + len: usize, + value: &'a ArrayRef, +} + +impl serde::Serialize for BoolArrayAsTuple<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut seq = serializer.serialize_tuple(self.len)?; + let array = self + .value + .as_boolean_opt() + .ok_or_else(|| error("not a boolean array"))?; + if array.len() != self.len { + return Err(error(format!( + "expected array with length {}, got length {}", + self.len, + array.len() + ))); + } + + for value in array.values() { + seq.serialize_element(&value)?; + } + + seq.end() + } +} + +fn serialize_arrow_string( + serializer: S, + array: &arrow::array::GenericByteArray>, + array_len: usize, +) -> Result<::Ok, ::Error> +where + S: serde::Serializer, + O: OffsetSizeTrait, +{ + let mut seq = serializer.serialize_tuple(array_len)?; + for s in array.iter() { + seq.serialize_element(s.unwrap_or_default())?; + } + seq.end() +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/serialize/defaults.rs b/libraries/extensions/ros2-bridge/python/src/typed/serialize/defaults.rs new file mode 100644 index 0000000000000000000000000000000000000000..2cc1e6b731208f9682480bc18c1391cfc04a176d --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/serialize/defaults.rs @@ -0,0 +1,237 @@ +use arrow::{ + array::{ + make_array, Array, ArrayData, BooleanArray, Float32Array, Float64Array, Int16Array, + Int32Array, Int64Array, Int8Array, ListArray, StringArray, StructArray, UInt16Array, + UInt32Array, UInt64Array, UInt8Array, + }, + buffer::{OffsetBuffer, ScalarBuffer}, + compute::concat, + datatypes::Field, +}; +use dora_ros2_bridge_msg_gen::types::{ + primitives::{BasicType, NestableType}, + MemberType, Message, +}; +use eyre::{Context, ContextCompat, Result}; +use std::{collections::HashMap, sync::Arc, vec}; + +pub fn default_for_member( + m: &dora_ros2_bridge_msg_gen::types::Member, + package_name: &str, + messages: &HashMap>, +) -> eyre::Result { + let value = match &m.r#type { + MemberType::NestableType(t) => match t { + NestableType::BasicType(_) | NestableType::GenericString(_) => match &m + .default + .as_deref() + { + Some([]) => eyre::bail!("empty default value not supported"), + Some([default]) => preset_default_for_basic_type(t, default) + .with_context(|| format!("failed to parse default value for `{}`", m.name))?, + Some(_) => eyre::bail!( + "there should be only a single default value for non-sequence types" + ), + None => default_for_nestable_type(t, package_name, messages, 1)?, + }, + NestableType::NamedType(_) => { + if m.default.is_some() { + eyre::bail!("default values for nested types are not supported") + } else { + default_for_nestable_type(t, package_name, messages, 1)? + } + } + NestableType::NamespacedType(_) => { + default_for_nestable_type(t, package_name, messages, 1)? + } + }, + MemberType::Array(array) => list_default_values( + m, + &array.value_type, + package_name, + messages, + Some(array.size), + )?, + MemberType::Sequence(seq) => { + list_default_values(m, &seq.value_type, package_name, messages, None)? + } + MemberType::BoundedSequence(seq) => list_default_values( + m, + &seq.value_type, + package_name, + messages, + Some(seq.max_size), + )?, + }; + Ok(value) +} + +fn default_for_nestable_type( + t: &NestableType, + package_name: &str, + messages: &HashMap>, + size: usize, +) -> Result { + let empty = HashMap::new(); + let package_messages = messages.get(package_name).unwrap_or(&empty); + let array = match t { + NestableType::BasicType(t) => match t { + BasicType::I8 => Int8Array::from(vec![0; size]).into(), + BasicType::I16 => Int16Array::from(vec![0; size]).into(), + BasicType::I32 => Int32Array::from(vec![0; size]).into(), + BasicType::I64 => Int64Array::from(vec![0; size]).into(), + BasicType::U8 => UInt8Array::from(vec![0; size]).into(), + BasicType::U16 => UInt16Array::from(vec![0; size]).into(), + BasicType::U32 => UInt32Array::from(vec![0; size]).into(), + BasicType::U64 => UInt64Array::from(vec![0; size]).into(), + BasicType::F32 => Float32Array::from(vec![0.; size]).into(), + BasicType::F64 => Float64Array::from(vec![0.; size]).into(), + BasicType::Char => StringArray::from(vec![""]).into(), + BasicType::Byte => UInt8Array::from(vec![0u8; size]).into(), + BasicType::Bool => BooleanArray::from(vec![false; size]).into(), + }, + NestableType::GenericString(_) => StringArray::from(vec![""]).into(), + NestableType::NamedType(name) => { + let referenced_message = package_messages + .get(&name.0) + .context("unknown referenced message")?; + + default_for_referenced_message(referenced_message, package_name, messages)? + } + NestableType::NamespacedType(t) => { + let referenced_package_messages = messages.get(&t.package).unwrap_or(&empty); + let referenced_message = referenced_package_messages + .get(&t.name) + .context("unknown referenced message")?; + default_for_referenced_message(referenced_message, &t.package, messages)? + } + }; + Ok(array) +} + +fn preset_default_for_basic_type(t: &NestableType, preset: &str) -> Result { + Ok(match t { + NestableType::BasicType(t) => match t { + BasicType::I8 => Int8Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::I16 => Int16Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::I32 => Int32Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::I64 => Int64Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::U8 => UInt8Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::U16 => UInt16Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::U32 => UInt32Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::U64 => UInt64Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::F32 => Float32Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::F64 => Float64Array::from(vec![preset + .parse::() + .context("Could not parse preset default value")?]) + .into(), + BasicType::Char => StringArray::from(vec![preset]).into(), + BasicType::Byte => UInt8Array::from(preset.as_bytes().to_owned()).into(), + BasicType::Bool => BooleanArray::from(vec![preset + .parse::() + .context("could not parse preset default value")?]) + .into(), + }, + NestableType::GenericString(_) => StringArray::from(vec![preset]).into(), + _ => todo!("preset_default_for_basic_type (other)"), + }) +} + +fn default_for_referenced_message( + referenced_message: &Message, + package_name: &str, + messages: &HashMap>, +) -> eyre::Result { + let fields: Vec<(Arc, Arc)> = referenced_message + .members + .iter() + .map(|m| { + let default = default_for_member(m, package_name, messages)?; + Result::<_, eyre::Report>::Ok(( + Arc::new(Field::new( + m.name.clone(), + default.data_type().clone(), + true, + )), + make_array(default), + )) + }) + .collect::>()?; + + let struct_array: StructArray = fields.into(); + Ok(struct_array.into()) +} + +fn list_default_values( + m: &dora_ros2_bridge_msg_gen::types::Member, + value_type: &NestableType, + package_name: &str, + messages: &HashMap>, + size: Option, +) -> Result { + let defaults = match &m.default.as_deref() { + Some([]) => eyre::bail!("empty default value not supported"), + Some(defaults) => { + let raw_array: Vec> = defaults + .iter() + .map(|default| { + preset_default_for_basic_type(value_type, default) + .with_context(|| format!("failed to parse default value for `{}`", m.name)) + .map(make_array) + }) + .collect::>()?; + let default_values = concat( + raw_array + .iter() + .map(|data| data.as_ref()) + .collect::>() + .as_slice(), + ) + .context("Failed to concatenate default list value")?; + default_values.to_data() + } + None => { + let size = size.unwrap_or(1); + let default_nested_type = + default_for_nestable_type(value_type, package_name, messages, size)?; + let offsets = OffsetBuffer::new(ScalarBuffer::from(vec![0, size as i32])); + + let field = Arc::new(Field::new( + "item", + default_nested_type.data_type().clone(), + true, + )); + let list = ListArray::new(field, offsets, make_array(default_nested_type), None); + list.to_data() + } + }; + + Ok(defaults) +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/serialize/mod.rs b/libraries/extensions/ros2-bridge/python/src/typed/serialize/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..8420f14f0996f1842614bcfdaa3b388590ae9c34 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/serialize/mod.rs @@ -0,0 +1,205 @@ +use std::{borrow::Cow, collections::HashMap, fmt::Display}; + +use arrow::{ + array::{Array, ArrayRef, AsArray}, + error, +}; +use dora_ros2_bridge_msg_gen::types::{ + primitives::{GenericString, NestableType}, + MemberType, +}; +use eyre::Context; +use serde::ser::SerializeTupleStruct; + +use super::{TypeInfo, DUMMY_STRUCT_NAME}; + +mod array; +mod defaults; +mod primitive; +mod sequence; + +#[derive(Debug, Clone)] +pub struct TypedValue<'a> { + pub value: &'a ArrayRef, + pub type_info: &'a TypeInfo<'a>, +} + +impl serde::Serialize for TypedValue<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let empty = HashMap::new(); + let package_messages = self + .type_info + .messages + .get(self.type_info.package_name.as_ref()) + .unwrap_or(&empty); + let message = package_messages + .get(self.type_info.message_name.as_ref()) + .ok_or_else(|| { + error(format!( + "could not find message type {}::{}", + self.type_info.package_name, self.type_info.message_name + )) + })?; + + let input = self.value.as_struct_opt().ok_or_else(|| { + error(format!( + "expected struct array for message: {}, with following format: {:#?} \n But, got type: {:#?}", + self.type_info.message_name, message, self.value.data_type() + )) + })?; + for column_name in input.column_names() { + if !message.members.iter().any(|m| m.name == column_name) { + return Err(error(format!( + "given struct has unknown field {column_name}" + )))?; + } + } + if input.is_empty() { + // TODO: publish default value + return Err(error("given struct is empty"))?; + } + if input.len() > 1 { + return Err(error(format!( + "expected single struct instance, got struct array with {} entries", + input.len() + )))?; + } + let mut s = serializer.serialize_tuple_struct(DUMMY_STRUCT_NAME, message.members.len())?; + for field in message.members.iter() { + let column: Cow<_> = match input.column_by_name(&field.name) { + Some(input) => Cow::Borrowed(input), + None => { + let default = defaults::default_for_member( + field, + &self.type_info.package_name, + &self.type_info.messages, + ) + .with_context(|| { + format!( + "failed to calculate default value for field {}.{}", + message.name, field.name + ) + }) + .map_err(|e| error(format!("{e:?}")))?; + Cow::Owned(arrow::array::make_array(default)) + } + }; + + self.serialize_field::(field, column, &mut s) + .map_err(|e| { + error(format!( + "failed to serialize field {}.{}: {e}", + message.name, field.name + )) + })?; + } + s.end() + } +} + +impl<'a> TypedValue<'a> { + fn serialize_field( + &self, + field: &dora_ros2_bridge_msg_gen::types::Member, + column: Cow<'_, std::sync::Arc>, + s: &mut S::SerializeTupleStruct, + ) -> Result<(), S::Error> + where + S: serde::Serializer, + { + match &field.r#type { + MemberType::NestableType(t) => match t { + NestableType::BasicType(t) => { + s.serialize_field(&primitive::SerializeWrapper { + t, + column: column.as_ref(), + })?; + } + NestableType::NamedType(name) => { + let referenced_value = &TypedValue { + value: column.as_ref(), + type_info: &TypeInfo { + package_name: Cow::Borrowed(&self.type_info.package_name), + message_name: Cow::Borrowed(&name.0), + messages: self.type_info.messages.clone(), + }, + }; + s.serialize_field(&referenced_value)?; + } + NestableType::NamespacedType(reference) => { + if reference.namespace != "msg" { + return Err(error(format!( + "struct field {} references non-message type {reference:?}", + field.name + ))); + } + + let referenced_value: &TypedValue<'_> = &TypedValue { + value: column.as_ref(), + type_info: &TypeInfo { + package_name: Cow::Borrowed(&reference.package), + message_name: Cow::Borrowed(&reference.name), + messages: self.type_info.messages.clone(), + }, + }; + s.serialize_field(&referenced_value)?; + } + NestableType::GenericString(t) => match t { + GenericString::String | GenericString::BoundedString(_) => { + let string = if let Some(string_array) = column.as_string_opt::() { + // should match the length of the outer struct array + assert_eq!(string_array.len(), 1); + string_array.value(0) + } else { + // try again with large offset type + let string_array = column + .as_string_opt::() + .ok_or_else(|| error("expected string array"))?; + // should match the length of the outer struct array + assert_eq!(string_array.len(), 1); + string_array.value(0) + }; + s.serialize_field(string)?; + } + GenericString::WString => todo!("serializing WString types"), + GenericString::BoundedWString(_) => { + todo!("serializing BoundedWString types") + } + }, + }, + dora_ros2_bridge_msg_gen::types::MemberType::Array(a) => { + s.serialize_field(&array::ArraySerializeWrapper { + array_info: a, + column: column.as_ref(), + type_info: self.type_info, + })?; + } + dora_ros2_bridge_msg_gen::types::MemberType::Sequence(v) => { + s.serialize_field(&sequence::SequenceSerializeWrapper { + item_type: &v.value_type, + column: column.as_ref(), + type_info: self.type_info, + })?; + } + dora_ros2_bridge_msg_gen::types::MemberType::BoundedSequence(v) => { + s.serialize_field(&sequence::SequenceSerializeWrapper { + item_type: &v.value_type, + column: column.as_ref(), + type_info: self.type_info, + })?; + } + } + Ok(()) + } +} + +fn error(e: T) -> E +where + T: Display, + E: serde::ser::Error, +{ + serde::ser::Error::custom(e) +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/serialize/primitive.rs b/libraries/extensions/ros2-bridge/python/src/typed/serialize/primitive.rs new file mode 100644 index 0000000000000000000000000000000000000000..a13bf444dff597ce2943c12c299775f9ac1a2253 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/serialize/primitive.rs @@ -0,0 +1,79 @@ +use arrow::{ + array::{ArrayRef, AsArray}, + datatypes::{self, ArrowPrimitiveType}, +}; +use dora_ros2_bridge_msg_gen::types::primitives::BasicType; + +pub struct SerializeWrapper<'a> { + pub t: &'a BasicType, + pub column: &'a ArrayRef, +} + +impl serde::Serialize for SerializeWrapper<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self.t { + BasicType::I8 => { + serializer.serialize_i8(as_single_primitive::(self.column)?) + } + BasicType::I16 => serializer + .serialize_i16(as_single_primitive::(self.column)?), + BasicType::I32 => serializer + .serialize_i32(as_single_primitive::(self.column)?), + BasicType::I64 => serializer + .serialize_i64(as_single_primitive::(self.column)?), + BasicType::U8 | BasicType::Char | BasicType::Byte => serializer + .serialize_u8(as_single_primitive::(self.column)?), + BasicType::U16 => serializer + .serialize_u16(as_single_primitive::( + self.column, + )?), + BasicType::U32 => serializer + .serialize_u32(as_single_primitive::( + self.column, + )?), + BasicType::U64 => serializer + .serialize_u64(as_single_primitive::( + self.column, + )?), + BasicType::F32 => serializer + .serialize_f32(as_single_primitive::( + self.column, + )?), + BasicType::F64 => serializer + .serialize_f64(as_single_primitive::( + self.column, + )?), + BasicType::Bool => { + let array = self.column.as_boolean_opt().ok_or_else(|| { + serde::ser::Error::custom( + "value is not compatible with expected `BooleanArray` type", + ) + })?; + // should match the length of the outer struct + assert_eq!(array.len(), 1); + let field_value = array.value(0); + serializer.serialize_bool(field_value) + } + } + } +} + +fn as_single_primitive(column: &ArrayRef) -> Result +where + T: ArrowPrimitiveType, + E: serde::ser::Error, +{ + let array: &arrow::array::PrimitiveArray = column.as_primitive_opt().ok_or_else(|| { + serde::ser::Error::custom(format!( + "value is not compatible with expected `{}` type", + std::any::type_name::() + )) + })?; + // should match the length of the outer struct + assert_eq!(array.len(), 1); + let number = array.value(0); + Ok(number) +} diff --git a/libraries/extensions/ros2-bridge/python/src/typed/serialize/sequence.rs b/libraries/extensions/ros2-bridge/python/src/typed/serialize/sequence.rs new file mode 100644 index 0000000000000000000000000000000000000000..d42d45fb62d9af1450f035d1105efbb3f23d2eb1 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/src/typed/serialize/sequence.rs @@ -0,0 +1,268 @@ +use std::{any::type_name, borrow::Cow, marker::PhantomData, sync::Arc}; + +use arrow::{ + array::{Array, ArrayRef, AsArray, OffsetSizeTrait, PrimitiveArray}, + datatypes::{self, ArrowPrimitiveType, UInt8Type}, +}; +use dora_ros2_bridge_msg_gen::types::primitives::{BasicType, GenericString, NestableType}; +use serde::ser::{SerializeSeq, SerializeTuple}; + +use crate::typed::TypeInfo; + +use super::{error, TypedValue}; + +/// Serialize a variable-sized sequence. +pub struct SequenceSerializeWrapper<'a> { + pub item_type: &'a NestableType, + pub column: &'a ArrayRef, + pub type_info: &'a TypeInfo<'a>, +} + +impl serde::Serialize for SequenceSerializeWrapper<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let entry = if let Some(list) = self.column.as_list_opt::() { + // should match the length of the outer struct + assert_eq!(list.len(), 1); + list.value(0) + } else if let Some(list) = self.column.as_list_opt::() { + // should match the length of the outer struct + assert_eq!(list.len(), 1); + list.value(0) + } else if let Some(list) = self.column.as_binary_opt::() { + // should match the length of the outer struct + assert_eq!(list.len(), 1); + Arc::new(list.slice(0, 1)) as ArrayRef + } else if let Some(list) = self.column.as_binary_opt::() { + // should match the length of the outer struct + assert_eq!(list.len(), 1); + Arc::new(list.slice(0, 1)) as ArrayRef + } else { + return Err(error(format!( + "value is not compatible with expected sequence type: {:?}", + self.column + ))); + }; + match &self.item_type { + NestableType::BasicType(t) => match t { + BasicType::I8 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I16 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I32 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::I64 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U8 | BasicType::Char | BasicType::Byte => { + ByteSequence { value: &entry }.serialize(serializer) + } + BasicType::U16 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U32 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::U64 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::F32 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::F64 => BasicSequence { + value: &entry, + ty: PhantomData::, + } + .serialize(serializer), + BasicType::Bool => BoolArray { value: &entry }.serialize(serializer), + }, + NestableType::NamedType(name) => { + let array = entry + .as_struct_opt() + .ok_or_else(|| error("not a struct array"))?; + let mut seq = serializer.serialize_seq(Some(array.len()))?; + for i in 0..array.len() { + let row = array.slice(i, 1); + seq.serialize_element(&TypedValue { + value: &(Arc::new(row) as ArrayRef), + type_info: &crate::typed::TypeInfo { + package_name: Cow::Borrowed(&self.type_info.package_name), + message_name: Cow::Borrowed(&name.0), + messages: self.type_info.messages.clone(), + }, + })?; + } + seq.end() + } + NestableType::NamespacedType(reference) => { + if reference.namespace != "msg" { + return Err(error(format!( + "sequence references non-message type {reference:?}" + ))); + } + + let array = entry + .as_struct_opt() + .ok_or_else(|| error("not a struct array"))?; + let mut seq = serializer.serialize_seq(Some(array.len()))?; + for i in 0..array.len() { + let row = array.slice(i, 1); + seq.serialize_element(&TypedValue { + value: &(Arc::new(row) as ArrayRef), + type_info: &crate::typed::TypeInfo { + package_name: Cow::Borrowed(&reference.package), + message_name: Cow::Borrowed(&reference.name), + messages: self.type_info.messages.clone(), + }, + })?; + } + seq.end() + } + NestableType::GenericString(s) => match s { + GenericString::String | GenericString::BoundedString(_) => { + match entry.as_string_opt::() { + Some(array) => serialize_arrow_string(serializer, array), + None => { + let array = entry + .as_string_opt::() + .ok_or_else(|| error("expected string array"))?; + serialize_arrow_string(serializer, array) + } + } + } + GenericString::WString => { + todo!("serializing WString sequences") + } + GenericString::BoundedWString(_) => todo!("serializing BoundedWString sequences"), + }, + } + } +} + +fn serialize_arrow_string( + serializer: S, + array: &arrow::array::GenericByteArray>, +) -> Result<::Ok, ::Error> +where + S: serde::Serializer, + O: OffsetSizeTrait, +{ + let mut seq = serializer.serialize_seq(Some(array.len()))?; + for s in array.iter() { + seq.serialize_element(s.unwrap_or_default())?; + } + seq.end() +} + +struct BasicSequence<'a, T> { + value: &'a ArrayRef, + ty: PhantomData, +} + +impl serde::Serialize for BasicSequence<'_, T> +where + T: ArrowPrimitiveType, + T::Native: serde::Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let array: &PrimitiveArray = self + .value + .as_primitive_opt() + .ok_or_else(|| error(format!("not a primitive {} array", type_name::())))?; + + let mut seq = serializer.serialize_seq(Some(array.len()))?; + + for value in array.values() { + seq.serialize_element(value)?; + } + + seq.end() + } +} + +struct ByteSequence<'a> { + value: &'a ArrayRef, +} + +impl serde::Serialize for ByteSequence<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + if let Some(binary) = self.value.as_binary_opt::() { + serialize_binary(serializer, binary) + } else if let Some(binary) = self.value.as_binary_opt::() { + serialize_binary(serializer, binary) + } else { + BasicSequence { + value: self.value, + ty: PhantomData::, + } + .serialize(serializer) + } + } +} + +fn serialize_binary( + serializer: S, + binary: &arrow::array::GenericByteArray>, +) -> Result<::Ok, ::Error> +where + S: serde::Serializer, + O: OffsetSizeTrait, +{ + let mut seq = serializer.serialize_seq(Some(binary.len()))?; + + for value in binary.iter() { + seq.serialize_element(value.unwrap_or_default())?; + } + + seq.end() +} + +struct BoolArray<'a> { + value: &'a ArrayRef, +} + +impl serde::Serialize for BoolArray<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let array = self + .value + .as_boolean_opt() + .ok_or_else(|| error("not a boolean array"))?; + let mut seq = serializer.serialize_tuple(array.len())?; + + for value in array.values() { + seq.serialize_element(&value)?; + } + + seq.end() + } +} diff --git a/libraries/extensions/ros2-bridge/python/test_utils.py b/libraries/extensions/ros2-bridge/python/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a0dd5e6f7fa81ec3e398cbc140e968c6bed53b16 --- /dev/null +++ b/libraries/extensions/ros2-bridge/python/test_utils.py @@ -0,0 +1,284 @@ +import numpy as np +import pyarrow as pa + + +# Marker Message Example +TEST_ARRAYS = [ + ("std_msgs", "UInt8", pa.array([{"data": np.uint8(2)}])), + ( + "std_msgs", + "String", + pa.array([{"data": "hello"}]), + ), + ( + "std_msgs", + "UInt8MultiArray", + pa.array( + [ + { + "data": np.array([1, 2, 3, 4], np.uint8), + "layout": { + "dim": [ + { + "label": "a", + "size": np.uint32(10), + "stride": np.uint32(20), + } + ], + "data_offset": np.uint32(30), + }, + } + ] + ), + ), + ( + "std_msgs", + "Float32MultiArray", + pa.array( + [ + { + "data": np.array([1, 2, 3, 4], np.float32), + "layout": { + "dim": [ + { + "label": "a", + "size": np.uint32(10), + "stride": np.uint32(20), + } + ], + "data_offset": np.uint32(30), + }, + } + ] + ), + ), + ( + "visualization_msgs", + "Marker", + pa.array( + [ + { + "header": { + "frame_id": "world", # Placeholder value (String type, no numpy equivalent) + }, + "ns": "my_namespace", # Placeholder value (String type, no numpy equivalent) + "id": np.int32(1), # Numpy type + "type": np.int32(0), # Numpy type (ARROW) + "action": np.int32(0), # Numpy type (ADD) + "lifetime": { + "sec": np.int32(1), + "nanosec": np.uint32(2), + }, # Numpy type + "pose": { + "position": { + "x": np.float64(1.0), # Numpy type + "y": np.float64(2.0), # Numpy type + "z": np.float64(3.0), # Numpy type + }, + "orientation": { + "x": np.float64(0.0), # Numpy type + "y": np.float64(0.0), # Numpy type + "z": np.float64(0.0), # Numpy type + "w": np.float64(1.0), # Numpy type + }, + }, + "scale": { + "x": np.float64(1.0), # Numpy type + "y": np.float64(1.0), # Numpy type + "z": np.float64(1.0), # Numpy type + }, + "color": { + "r": np.float32(1.0), # Numpy type + "g": np.float32(0.0), # Numpy type + "b": np.float32(0.0), # Numpy type + "a": np.float32(1.0), # Numpy type (alpha) + }, + "frame_locked": False, # Boolean type, no numpy equivalent + "points": [ # Numpy array for points + { + "x": np.float64(1.0), # Numpy type + "y": np.float64(1.0), # Numpy type + "z": np.float64(1.0), # Numpy type + } + ], + "colors": [ + { + "r": np.float32(1.0), # Numpy type + "g": np.float32(1.0), # Numpy type + "b": np.float32(1.0), # Numpy type + "a": np.float32(1.0), # Numpy type (alpha) + } # Numpy array for colors + ], + "texture_resource": "", + "uv_coordinates": [{}], + "text": "", + "mesh_resource": "", + "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent + } + ] + ), + ), + ( + "visualization_msgs", + "MarkerArray", + pa.array( + [ + { + "markers": [ + { + "header": { + "frame_id": "world", # Placeholder value (String type, no numpy equivalent) + }, + "ns": "my_namespace", # Placeholder value (String type, no numpy equivalent) + "id": np.int32(1), # Numpy type + "type": np.int32(0), # Numpy type (ARROW) + "action": np.int32(0), # Numpy type (ADD) + "lifetime": { + "sec": np.int32(1), + "nanosec": np.uint32(2), + }, # Numpy type + "pose": { + "position": { + "x": np.float64(1.0), # Numpy type + "y": np.float64(2.0), # Numpy type + "z": np.float64(3.0), # Numpy type + }, + "orientation": { + "x": np.float64(0.0), # Numpy type + "y": np.float64(0.0), # Numpy type + "z": np.float64(0.0), # Numpy type + "w": np.float64(1.0), # Numpy type + }, + }, + "scale": { + "x": np.float64(1.0), # Numpy type + "y": np.float64(1.0), # Numpy type + "z": np.float64(1.0), # Numpy type + }, + "color": { + "r": np.float32(1.0), # Numpy type + "g": np.float32(0.0), # Numpy type + "b": np.float32(0.0), # Numpy type + "a": np.float32(1.0), # Numpy type (alpha) + }, + "frame_locked": False, # Boolean type, no numpy equivalent + "points": [ # Numpy array for points + { + "x": np.float64(1.0), # Numpy type + "y": np.float64(1.0), # Numpy type + "z": np.float64(1.0), # Numpy type + } + ], + "colors": [ + { + "r": np.float32(1.0), # Numpy type + "g": np.float32(1.0), # Numpy type + "b": np.float32(1.0), # Numpy type + "a": np.float32(1.0), # Numpy type (alpha) + } # Numpy array for colors + ], + "texture_resource": "", + "uv_coordinates": [{}], + "text": "", + "mesh_resource": "", + "mesh_use_embedded_materials": False, # Boolean type, no numpy equivalent + } + ] + } + ] + ), + ), + ( + "visualization_msgs", + "ImageMarker", + pa.array( + [ + { + "header": { + "stamp": { + "sec": np.int32(123456), # 32-bit integer + "nanosec": np.uint32(789), # 32-bit unsigned integer + }, + "frame_id": "frame_example", + }, + "ns": "namespace", + "id": np.int32(1), # 32-bit integer + "type": np.int32(0), # 32-bit integer, e.g., CIRCLE = 0 + "action": np.int32(0), # 32-bit integer, e.g., ADD = 0 + "position": { + "x": np.float64(1.0), # 32-bit float + "y": np.float64(2.0), # 32-bit float + "z": np.float64(3.0), # 32-bit float + }, + "scale": np.float32(1.0), # 32-bit float + "outline_color": { + "r": np.float32(255.0), # 32-bit float + "g": np.float32(0.0), # 32-bit float + "b": np.float32(0.0), # 32-bit float + "a": np.float32(1.0), # 32-bit float + }, + "filled": np.uint8(1), # 8-bit unsigned integer + "fill_color": { + "r": np.float32(0.0), # 32-bit float + "g": np.float32(255.0), # 32-bit float + "b": np.float32(0.0), # 32-bit float + "a": np.float32(1.0), # 32-bit float + }, + "lifetime": { + "sec": np.int32(300), # 32-bit integer + "nanosec": np.uint32(0), # 32-bit unsigned integer + }, + "points": [ + { + "x": np.float64(1.0), # 32-bit float + "y": np.float64(2.0), # 32-bit float + "z": np.float64(3.0), # 32-bit float + }, + { + "x": np.float64(4.0), # 32-bit float + "y": np.float64(5.0), # 32-bit float + "z": np.float64(6.0), # 32-bit float + }, + ], + "outline_colors": [ + { + "r": np.float32(255.0), # 32-bit float + "g": np.float32(0.0), # 32-bit float + "b": np.float32(0.0), # 32-bit float + "a": np.float32(1.0), # 32-bit float + }, + { + "r": np.float32(0.0), # 32-bit float + "g": np.float32(255.0), # 32-bit float + "b": np.float32(0.0), # 32-bit float + "a": np.float32(1.0), # 32-bit float + }, + ], + } + ] + ), + ), +] + + +def is_subset(subset, superset): + """ + Check if subset is a subset of superset, to avoid false negatives linked to default values. + """ + if isinstance(subset, pa.Array): + return is_subset(subset.to_pylist(), superset.to_pylist()) + + match subset: + case dict(_): + return all( + key in superset and is_subset(val, superset[key]) + for key, val in subset.items() + ) + case list(_) | set(_): + return all( + any(is_subset(subitem, superitem) for superitem in superset) + for subitem in subset + ) + # assume that subset is a plain value if none of the above match + case _: + return subset == superset diff --git a/libraries/extensions/ros2-bridge/src/_core/mod.rs b/libraries/extensions/ros2-bridge/src/_core/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..31b88dce3ec1f1cb0af73e73717c4897842ee84e --- /dev/null +++ b/libraries/extensions/ros2-bridge/src/_core/mod.rs @@ -0,0 +1,9 @@ +pub use widestring; + +pub mod sequence; +pub mod string; +pub mod traits; + +pub use sequence::{FFISeq, OwnedFFISeq, RefFFISeq}; +pub use string::{FFIString, FFIWString, OwnedFFIString, OwnedFFIWString}; +pub use traits::{ActionT, FFIFromRust, FFIToRust, InternalDefault, MessageT}; diff --git a/libraries/extensions/ros2-bridge/src/_core/sequence.rs b/libraries/extensions/ros2-bridge/src/_core/sequence.rs new file mode 100644 index 0000000000000000000000000000000000000000..7a9411c817c5970c0afd04a844fa69da4ccd3a40 --- /dev/null +++ b/libraries/extensions/ros2-bridge/src/_core/sequence.rs @@ -0,0 +1,184 @@ +use std::{mem::ManuallyDrop, ops::Deref}; + +use super::traits::{FFIFromRust, FFIToRust}; + +#[repr(C)] +#[derive(Debug)] +pub struct FFISeq { + data: *mut T, + size: usize, + capacity: usize, +} + +impl FFISeq { + /// Extracts a slice. + pub fn as_slice(&self) -> &[T] { + self + } + + /// Returns the length of the sequence. + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the sequence has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIToRust for FFISeq +where + T: FFIToRust, +{ + type Target = Vec; + + unsafe fn to_rust(&self) -> Self::Target { + self.iter().map(|v| v.to_rust()).collect() + } +} + +macro_rules! impl_traits_to_primitive { + ($type: ty) => { + impl FFIToRust for FFISeq<$type> { + type Target = Vec<$type>; + + unsafe fn to_rust(&self) -> Self::Target { + self.iter().cloned().collect() + } + } + }; +} + +impl_traits_to_primitive!(i8); +impl_traits_to_primitive!(i16); +impl_traits_to_primitive!(i32); +impl_traits_to_primitive!(i64); +impl_traits_to_primitive!(u8); +impl_traits_to_primitive!(u16); +impl_traits_to_primitive!(u32); +impl_traits_to_primitive!(u64); +impl_traits_to_primitive!(f32); +impl_traits_to_primitive!(f64); +impl_traits_to_primitive!(bool); + +impl Deref for FFISeq { + type Target = [T]; + + fn deref(&self) -> &[T] { + unsafe { std::slice::from_raw_parts(self.data, self.len()) } + } +} + +impl AsRef<[T]> for FFISeq { + fn as_ref(&self) -> &[T] { + self + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct OwnedFFISeq { + data: *mut T, + size: usize, + capacity: usize, +} + +impl OwnedFFISeq { + /// Extracts a slice. + pub fn as_slice(&self) -> &[T] { + unsafe { std::slice::from_raw_parts(self.data, self.len()) } + } + + /// Returns the length of the sequence. + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the sequence has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIFromRust for OwnedFFISeq +where + T: FFIFromRust, +{ + type From = Vec; + + unsafe fn from_rust(vec: &Self::From) -> Self { + if vec.is_empty() { + Self { + data: std::ptr::null_mut(), + size: 0, + capacity: 0, + } + } else { + let mut new_vec = vec + .iter() + .map(|v| FFIFromRust::from_rust(v)) + .collect::>(); + new_vec.shrink_to_fit(); + assert_eq!(new_vec.len(), new_vec.capacity()); + let mut new_vec = ManuallyDrop::new(new_vec); + Self { + data: new_vec.as_mut_ptr(), + size: new_vec.len(), + capacity: new_vec.len(), + } + } + } +} + +impl Drop for OwnedFFISeq { + fn drop(&mut self) { + unsafe { Vec::from_raw_parts(self.data, self.size, self.capacity) }; + } +} + +/// Temporally borrowed buffer from `Vec` +#[repr(C)] +#[derive(Debug)] +pub struct RefFFISeq { + data: *mut T, + size: usize, + capacity: usize, +} + +impl RefFFISeq { + /// Extracts a slice. + pub fn as_slice(&self) -> &[T] { + unsafe { std::slice::from_raw_parts(self.data, self.len()) } + } + + /// Returns the length of the sequence. + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the sequence has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIFromRust for RefFFISeq { + type From = Vec; + + unsafe fn from_rust(vec: &Self::From) -> Self { + if vec.is_empty() { + Self { + data: std::ptr::null_mut(), + size: 0, + capacity: 0, + } + } else { + Self { + data: vec.as_ptr() as *mut _, + size: vec.len(), + capacity: vec.len(), + } + } + } +} diff --git a/libraries/extensions/ros2-bridge/src/_core/string.rs b/libraries/extensions/ros2-bridge/src/_core/string.rs new file mode 100644 index 0000000000000000000000000000000000000000..0bf0de7b4c0be8eedebf00207a7719e1d9eb6ae3 --- /dev/null +++ b/libraries/extensions/ros2-bridge/src/_core/string.rs @@ -0,0 +1,253 @@ +use std::{ + ffi::{CStr, CString}, + ops::{Deref, DerefMut}, + os::raw::c_char, +}; + +use widestring::{U16CStr, U16CString}; + +use super::traits::{FFIFromRust, FFIToRust}; + +#[derive( + Debug, + Default, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + serde::Serialize, + serde::Deserialize, +)] +#[serde(from = "Vec", into = "Vec")] +#[repr(transparent)] +pub struct U16String(widestring::U16String); + +impl U16String { + pub fn new() -> Self { + Self(widestring::U16String::new()) + } + + #[allow(clippy::should_implement_trait)] + pub fn from_str(arg: &str) -> U16String { + Self(widestring::U16String::from_str(arg)) + } +} + +impl Deref for U16String { + type Target = widestring::U16String; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for U16String { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl AsRef for U16String { + fn as_ref(&self) -> &widestring::U16Str { + self.0.as_ref() + } +} + +impl From for Vec { + fn from(value: U16String) -> Self { + value.0.into_vec() + } +} + +impl From> for U16String { + fn from(value: Vec) -> Self { + Self(value.into()) + } +} + +/// An array of 8-bit characters terminated by a null character. +#[repr(C)] +#[derive(Debug)] +pub struct FFIString { + data: *mut c_char, + size: usize, + capacity: usize, +} + +impl FFIString { + /// Returns the length of the string (excluding the null byte) + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the string has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub unsafe fn to_str(&self) -> Result<&str, std::str::Utf8Error> { + if self.is_empty() { + Ok("") + } else { + CStr::from_ptr(self.data).to_str() + } + } +} + +impl FFIToRust for FFIString { + type Target = String; + + unsafe fn to_rust(&self) -> Self::Target { + self.to_str().expect("CStr::to_str failed").to_string() + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct OwnedFFIString { + data: *mut c_char, + size: usize, + capacity: usize, +} + +impl OwnedFFIString { + /// Returns the length of the string (excluding the null byte) + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the string has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIFromRust for OwnedFFIString { + type From = String; + + unsafe fn from_rust(string: &Self::From) -> Self { + let cstring = CString::new(string.clone()).expect("CString::new failed"); + let len = cstring.as_bytes().len(); + Self { + data: cstring.into_raw(), + size: len, + capacity: len + 1, + } + } +} + +impl Drop for OwnedFFIString { + fn drop(&mut self) { + unsafe { + std::mem::drop(CString::from_raw(self.data)); + } + } +} + +/// An array of 16-bit characters terminated by a null character. +#[repr(C)] +#[derive(Debug)] +pub struct FFIWString { + data: *mut u16, + size: usize, + capacity: usize, +} + +impl FFIWString { + /// Returns the length of the string (excluding the null byte) + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the sequence has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIToRust for FFIWString { + type Target = U16String; + + unsafe fn to_rust(&self) -> Self::Target { + if self.is_empty() { + Self::Target::new() + } else { + U16String(U16CStr::from_ptr_str(self.data).to_ustring()) + } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct OwnedFFIWString { + data: *mut u16, + size: usize, + capacity: usize, +} + +impl OwnedFFIWString { + /// Returns the length of the string (excluding the null byte) + pub const fn len(&self) -> usize { + self.size + } + + /// Returns `true` if the sequence has a length of 0. + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl FFIFromRust for OwnedFFIWString { + type From = U16String; + + unsafe fn from_rust(string: &Self::From) -> Self { + let cstring = U16CString::from_ustr(string).expect("U16CString::new failed"); + let len = cstring.len(); + Self { + data: cstring.into_raw(), + size: len, + capacity: len + 1, + } + } +} + +impl Drop for OwnedFFIWString { + fn drop(&mut self) { + unsafe { + std::mem::drop(U16CString::from_raw(self.data)); + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn owned_ffi_string_new() { + let string = "abcde".into(); + let cstring = unsafe { OwnedFFIString::from_rust(&string) }; + let native_string = FFIString { + data: cstring.data, + size: cstring.size, + capacity: cstring.capacity, + }; + + assert_eq!(string, unsafe { native_string.to_rust() }); + } + + #[test] + fn owned_ffi_wstring_new() { + let wstring = U16String::from_str("あいうえお"); + let cwstring = unsafe { OwnedFFIWString::from_rust(&wstring) }; + let native_wstring = FFIWString { + data: cwstring.data, + size: cwstring.size, + capacity: cwstring.capacity, + }; + + assert_eq!(wstring, unsafe { native_wstring.to_rust() }); + } +} diff --git a/libraries/extensions/ros2-bridge/src/_core/traits.rs b/libraries/extensions/ros2-bridge/src/_core/traits.rs new file mode 100644 index 0000000000000000000000000000000000000000..709f34072f886ad6c6e63359bf3d5c4a53765212 --- /dev/null +++ b/libraries/extensions/ros2-bridge/src/_core/traits.rs @@ -0,0 +1,113 @@ +use std::convert::TryInto; + +use super::string::U16String; +use array_init::array_init; + +pub trait MessageT: Default + Send + Sync { + type Raw: FFIToRust + Send + Sync; + type RawRef: FFIFromRust; + + unsafe fn from_raw(from: &Self::Raw) -> Self { + from.to_rust() + } + + unsafe fn to_raw_ref(&self) -> Self::RawRef { + Self::RawRef::from_rust(self) + } +} + +pub trait ActionT: Send { + type Goal: MessageT; + type Result: MessageT; + type Feedback: MessageT; + type SendGoal; + type GetResult; + type FeedbackMessage: MessageT; +} + +// I was going to use `std::default::Default`, however generic arrays do not implement `std::default::Default`. +pub trait InternalDefault { + fn _default() -> Self; +} + +impl InternalDefault for Vec { + fn _default() -> Self { + Self::new() + } +} + +impl InternalDefault for [T; N] +where + T: InternalDefault + std::fmt::Debug, +{ + fn _default() -> Self { + array_init(|_| InternalDefault::_default()) + } +} + +macro_rules! impl_trait { + ($type: ty) => { + impl InternalDefault for $type { + fn _default() -> Self { + Self::default() + } + } + }; +} + +impl_trait!(i8); +impl_trait!(i16); +impl_trait!(i32); +impl_trait!(i64); +impl_trait!(u8); +impl_trait!(u16); +impl_trait!(u32); +impl_trait!(u64); +impl_trait!(f32); +impl_trait!(f64); +impl_trait!(bool); +impl_trait!(String); +impl_trait!(U16String); + +pub trait FFIToRust { + type Target; + + unsafe fn to_rust(&self) -> Self::Target; +} + +impl FFIToRust for [T; N] +where + T: FFIToRust, + T::Target: std::fmt::Debug, +{ + type Target = [T::Target; N]; + + unsafe fn to_rust(&self) -> ::Target { + self.iter() + .map(|v| v.to_rust()) + .collect::>() + .try_into() + .unwrap() + } +} + +pub trait FFIFromRust { + type From; + + unsafe fn from_rust(from: &Self::From) -> Self; +} + +impl FFIFromRust for [T; N] +where + T: FFIFromRust + std::fmt::Debug, +{ + type From = [T::From; N]; + + unsafe fn from_rust(from: &Self::From) -> Self { + from.iter() + .map(|v| FFIFromRust::from_rust(v)) + .collect::>() + .try_into() + .unwrap() + } +} diff --git a/libraries/extensions/ros2-bridge/src/lib.rs b/libraries/extensions/ros2-bridge/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1909365fc30fb0ba29e166ab1c8ce0c6e3b82bdd --- /dev/null +++ b/libraries/extensions/ros2-bridge/src/lib.rs @@ -0,0 +1,15 @@ +#![allow(clippy::missing_safety_doc)] + +pub use flume; +pub use futures; +pub use futures_timer; +pub use ros2_client; +pub use rustdds; +pub use tracing; + +#[cfg(feature = "generate-messages")] +pub mod messages { + include!(env!("MESSAGES_PATH")); +} + +pub mod _core; diff --git a/libraries/extensions/telemetry/metrics/Cargo.toml b/libraries/extensions/telemetry/metrics/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9a5b23991d82fb3ab3ebe3c02cf5206d151e5625 --- /dev/null +++ b/libraries/extensions/telemetry/metrics/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "dora-metrics" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +opentelemetry = { version = "0.22.0", features = ["metrics"] } +opentelemetry-otlp = { version = "0.15.0", features = ["tonic", "metrics"] } +opentelemetry_sdk = { version = "0.22.0", features = ["rt-tokio", "metrics"] } +eyre = "0.6.12" +opentelemetry-system-metrics = { version = "0.1.8" } diff --git a/libraries/extensions/telemetry/metrics/src/lib.rs b/libraries/extensions/telemetry/metrics/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d24632d3ca792287d050e7f884a50a4751dc3f1 --- /dev/null +++ b/libraries/extensions/telemetry/metrics/src/lib.rs @@ -0,0 +1,49 @@ +//! Enable system metric through opentelemetry exporter. +//! +//! This module fetch system information using [`sysinfo`] and +//! export those metrics via an [`opentelemetry-rust`] exporter with default configuration. +//! Observed metrics are: +//! - CPU usage. +//! - Memory and Virtual memory usage. +//! - disk usage (read and write). +//! +//! [`sysinfo`]: https://github.com/GuillaumeGomez/sysinfo +//! [`opentelemetry-rust`]: https://github.com/open-telemetry/opentelemetry-rust + +use std::time::Duration; + +use eyre::{Context, Result}; +use opentelemetry::metrics::{self, MeterProvider as _}; +use opentelemetry_otlp::{ExportConfig, WithExportConfig}; +use opentelemetry_sdk::{metrics::SdkMeterProvider, runtime}; +use opentelemetry_system_metrics::init_process_observer; +/// Init opentelemetry meter +/// +/// Use the default Opentelemetry exporter with default config +/// TODO: Make Opentelemetry configurable +/// +pub fn init_metrics() -> metrics::Result { + let endpoint = std::env::var("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT") + .unwrap_or_else(|_| "http://localhost:4317".to_string()); + let export_config = ExportConfig { + endpoint, + ..ExportConfig::default() + }; + + opentelemetry_otlp::new_pipeline() + .metrics(runtime::Tokio) + .with_exporter( + opentelemetry_otlp::new_exporter() + .tonic() + .with_export_config(export_config), + ) + .with_period(Duration::from_secs(10)) + .build() +} + +pub fn init_meter_provider(meter_id: String) -> Result { + let meter_provider = init_metrics().context("Could not create opentelemetry meter")?; + let meter = meter_provider.meter(meter_id); + init_process_observer(meter).context("could not initiale system metrics observer")?; + Ok(meter_provider) +} diff --git a/libraries/extensions/telemetry/tracing/Cargo.toml b/libraries/extensions/telemetry/tracing/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4808b0776e4ba7358b8918643b7f6ec96efaaf15 --- /dev/null +++ b/libraries/extensions/telemetry/tracing/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "dora-tracing" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] + +[dependencies] +tracing-subscriber = { version = "0.3.15", features = ["env-filter"] } +tracing-opentelemetry = { version = "0.18.0" } +eyre = "0.6.8" +tracing = "0.1.36" +opentelemetry = { version = "0.18.0", features = ["rt-tokio", "metrics"] } +opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } diff --git a/libraries/extensions/telemetry/tracing/src/lib.rs b/libraries/extensions/telemetry/tracing/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..b51b8eb44cee1a4c49098e9ed8925d3640bb90f1 --- /dev/null +++ b/libraries/extensions/telemetry/tracing/src/lib.rs @@ -0,0 +1,40 @@ +//! Enable tracing using Opentelemetry and Jaeger. +//! +//! This module init a tracing propagator for Rust code that requires tracing, and is +//! able to serialize and deserialize context that has been sent via the middleware. + +use eyre::Context as EyreContext; +use tracing::metadata::LevelFilter; +use tracing_subscriber::{ + filter::FilterExt, prelude::__tracing_subscriber_SubscriberExt, EnvFilter, Layer, +}; + +use eyre::ContextCompat; +use tracing_subscriber::Registry; +pub mod telemetry; + +pub fn set_up_tracing(name: &str) -> eyre::Result<()> { + // Filter log using `RUST_LOG`. More useful for CLI. + let filter = EnvFilter::from_default_env().or(LevelFilter::WARN); + let stdout_log = tracing_subscriber::fmt::layer() + .pretty() + .with_filter(filter); + + let registry = Registry::default().with(stdout_log); + if let Some(endpoint) = std::env::var_os("DORA_JAEGER_TRACING") { + let endpoint = endpoint + .to_str() + .wrap_err("Could not parse env variable: DORA_JAEGER_TRACING")?; + let tracer = crate::telemetry::init_jaeger_tracing(name, endpoint) + .wrap_err("Could not instantiate tracing")?; + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let subscriber = registry.with(telemetry); + tracing::subscriber::set_global_default(subscriber).context(format!( + "failed to set tracing global subscriber for {name}" + )) + } else { + tracing::subscriber::set_global_default(registry).context(format!( + "failed to set tracing global subscriber for {name}" + )) + } +} diff --git a/libraries/extensions/telemetry/tracing/src/telemetry.rs b/libraries/extensions/telemetry/tracing/src/telemetry.rs new file mode 100644 index 0000000000000000000000000000000000000000..526fe970bf88ed532d90fed2d14fd73a5af01483 --- /dev/null +++ b/libraries/extensions/telemetry/tracing/src/telemetry.rs @@ -0,0 +1,70 @@ +use opentelemetry::propagation::Extractor; +use opentelemetry::sdk::{propagation::TraceContextPropagator, trace as sdktrace}; +use opentelemetry::trace::TraceError; +use opentelemetry::{global, Context}; +use std::collections::HashMap; + +struct MetadataMap<'a>(HashMap<&'a str, &'a str>); + +impl<'a> Extractor for MetadataMap<'a> { + /// Get a value for a key from the MetadataMap. If the value can't be converted to &str, returns None + fn get(&self, key: &str) -> Option<&str> { + self.0.get(key).cloned() + } + + /// Collect all the keys from the MetadataMap. + fn keys(&self) -> Vec<&str> { + self.0.keys().cloned().collect() + } +} + +/// Init opentelemetry tracing +/// +/// Use the default exporter Jaeger as exporter with +/// - host: `172.17.0.1` which correspond to the docker address +/// - port: 6831 which is the default Jaeger port. +/// +/// To launch the associated Jaeger docker container, launch the +/// following command: +/// ```bash +/// docker run -d -p 6831:6831/udp -p 6832:6832/udp -p 16686:16686 -p 14268:14268 jaegertracing/all-in-one:latest +/// ``` +/// +/// TODO: Make Jaeger configurable +/// +pub fn init_jaeger_tracing(name: &str, endpoint: &str) -> Result { + global::set_text_map_propagator(TraceContextPropagator::new()); + opentelemetry_jaeger::new_agent_pipeline() + .with_endpoint(endpoint) + .with_service_name(name) + .install_simple() +} + +pub fn serialize_context(context: &Context) -> String { + let mut map = HashMap::new(); + global::get_text_map_propagator(|propagator| propagator.inject_context(context, &mut map)); + let mut string_context = String::new(); + for (k, v) in map.iter() { + string_context.push_str(k); + string_context.push(':'); + string_context.push_str(v); + string_context.push(';'); + } + string_context +} + +pub fn deserialize_context(string_context: &str) -> Context { + let map = MetadataMap(deserialize_to_hashmap(string_context)); + global::get_text_map_propagator(|prop| prop.extract(&map)) +} + +pub fn deserialize_to_hashmap(string_context: &str) -> HashMap<&str, &str> { + let mut map = HashMap::new(); + for s in string_context.split(';') { + let mut values = s.split(':'); + let key = values.next().unwrap(); + let value = values.next().unwrap_or(""); + map.insert(key, value); + } + map +} diff --git a/libraries/message/Cargo.toml b/libraries/message/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a9dd3c04b4bbde4d36ae7e98f18ac54e8c478552 --- /dev/null +++ b/libraries/message/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-message" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arrow-data = { workspace = true } +uhlc = "0.5.1" +serde = { version = "1.0.136", features = ["derive"] } +eyre = "0.6.8" +arrow-schema = { workspace = true, features = ["serde"] } diff --git a/libraries/message/src/lib.rs b/libraries/message/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..40c18f9926ff6038fa6c412eaa1ac68d2ce5d1d9 --- /dev/null +++ b/libraries/message/src/lib.rs @@ -0,0 +1,145 @@ +//! Enable serialisation and deserialisation of capnproto messages +//! + +#![allow(clippy::missing_safety_doc)] + +use arrow_data::ArrayData; +use arrow_schema::DataType; +use eyre::Context; +use serde::{Deserialize, Serialize}; +pub use uhlc; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Metadata { + metadata_version: u16, + timestamp: uhlc::Timestamp, + pub type_info: ArrowTypeInfo, + pub parameters: MetadataParameters, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ArrowTypeInfo { + pub data_type: DataType, + pub len: usize, + pub null_count: usize, + pub validity: Option>, + pub offset: usize, + pub buffer_offsets: Vec, + pub child_data: Vec, +} + +impl ArrowTypeInfo { + pub const fn empty() -> Self { + Self { + data_type: DataType::Null, + len: 0, + null_count: 0, + validity: None, + offset: 0, + buffer_offsets: Vec::new(), + child_data: Vec::new(), + } + } + + pub fn byte_array(data_len: usize) -> Self { + Self { + data_type: DataType::UInt8, + len: data_len, + null_count: 0, + validity: None, + offset: 0, + buffer_offsets: vec![BufferOffset { + offset: 0, + len: data_len, + }], + child_data: Vec::new(), + } + } + + pub unsafe fn from_array( + array: &ArrayData, + region_start: *const u8, + region_len: usize, + ) -> eyre::Result { + Ok(Self { + data_type: array.data_type().clone(), + len: array.len(), + null_count: array.null_count(), + validity: array.nulls().map(|b| b.validity().to_owned()), + offset: array.offset(), + buffer_offsets: array + .buffers() + .iter() + .map(|b| { + let ptr = b.as_ptr(); + if ptr as usize <= region_start as usize { + eyre::bail!("ptr {ptr:p} starts before region {region_start:p}"); + } + if ptr as usize >= region_start as usize + region_len { + eyre::bail!("ptr {ptr:p} starts after region {region_start:p}"); + } + if ptr as usize + b.len() > region_start as usize + region_len { + eyre::bail!("ptr {ptr:p} ends after region {region_start:p}"); + } + let offset = usize::try_from(unsafe { ptr.offset_from(region_start) }) + .context("offset_from is negative")?; + + Result::<_, eyre::Report>::Ok(BufferOffset { + offset, + len: b.len(), + }) + }) + .collect::>()?, + child_data: array + .child_data() + .iter() + .map(|c| unsafe { Self::from_array(c, region_start, region_len) }) + .collect::>()?, + }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BufferOffset { + pub offset: usize, + pub len: usize, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default, serde::Serialize, serde::Deserialize)] +pub struct MetadataParameters { + pub watermark: u64, + pub deadline: u64, + pub open_telemetry_context: String, +} + +impl MetadataParameters { + pub fn into_owned(self) -> MetadataParameters { + MetadataParameters { + open_telemetry_context: self.open_telemetry_context, + ..self + } + } +} + +impl Metadata { + pub fn new(timestamp: uhlc::Timestamp, type_info: ArrowTypeInfo) -> Self { + Self::from_parameters(timestamp, type_info, Default::default()) + } + + pub fn from_parameters( + timestamp: uhlc::Timestamp, + type_info: ArrowTypeInfo, + parameters: MetadataParameters, + ) -> Self { + Self { + metadata_version: 0, + timestamp, + parameters, + type_info, + } + } + + pub fn timestamp(&self) -> uhlc::Timestamp { + self.timestamp + } +} diff --git a/libraries/shared-memory-server/Cargo.toml b/libraries/shared-memory-server/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..fbca178cb82cc675cd9e9853d9a405f3a9f3afed --- /dev/null +++ b/libraries/shared-memory-server/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "shared-memory-server" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eyre = "0.6.8" +serde = { version = "1.0.152", features = ["derive"] } +shared_memory_extended = "0.13.0" +# TODO use upstream release once https://github.com/elast0ny/raw_sync-rs/pull/29 is merged +# Current fix, use personally pushed `raw_sync_2` version. +raw_sync_2 = "0.1.5" +bincode = "1.3.3" +tracing = "0.1.37" diff --git a/libraries/shared-memory-server/src/bin/bench.rs b/libraries/shared-memory-server/src/bin/bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..1392b7e662211728adf217f8192f7adecea4c7c3 --- /dev/null +++ b/libraries/shared-memory-server/src/bin/bench.rs @@ -0,0 +1,109 @@ +use std::{ + process::Command, + time::{Duration, Instant}, +}; + +use eyre::{eyre, Context, ContextCompat}; +use shared_memory_server::{ShmemClient, ShmemConf, ShmemServer}; + +fn main() -> eyre::Result<()> { + let mut args = std::env::args(); + let executable = args.next().wrap_err("no arg 0")?; + let arg = args.next(); + + match arg.as_deref() { + Some("client") => client(args.next().wrap_err("no shmem id")?)?, + None => server(executable)?, + Some(other) => eyre::bail!("unexpected argument `{other}`"), + } + + Ok(()) +} + +fn server(executable: String) -> eyre::Result<()> { + let shmem = ShmemConf::new() + .size(4096) + .create() + .wrap_err("failed to create shmem region")?; + let shmem_id = shmem.get_os_id().to_owned(); + let mut server = unsafe { ShmemServer::new(shmem) }.wrap_err("failed to create ShmemServer")?; + + let mut client = Command::new(executable); + client.arg("client").arg(shmem_id); + let mut client_handle = client.spawn().wrap_err("failed to spawn client process")?; + + server_loop(&mut server).wrap_err("server loop failed")?; + + let status = client_handle + .wait() + .wrap_err("failed to wait for client process")?; + + if status.success() { + Ok(()) + } else { + Err(eyre!("client failed")) + } +} + +#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] +enum Request { + Ping, +} + +#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] +enum Reply { + Pong, +} + +fn server_loop(server: &mut ShmemServer) -> eyre::Result<()> { + while let Some(request) = server.listen().wrap_err("failed to receive next message")? { + match request { + Request::Ping => server + .send_reply(&Reply::Pong) + .wrap_err("failed to send reply")?, + } + } + Ok(()) +} + +fn client(shmem_id: String) -> eyre::Result<()> { + let shmem = ShmemConf::new() + .os_id(shmem_id) + .open() + .wrap_err("failed to open shmem region")?; + let mut client = unsafe { ShmemClient::new(shmem, Some(Duration::from_secs(2))) } + .wrap_err("failed to create ShmemClient")?; + + client_loop(&mut client).wrap_err("client loop failed")?; + + Ok(()) +} + +fn client_loop(client: &mut ShmemClient) -> eyre::Result<()> { + let mut latencies = Vec::new(); + for _ in 0..10_000_000 { + let start = Instant::now(); + let reply = client.request(&Request::Ping).wrap_err("ping failed")?; + match reply { + Reply::Pong => { + latencies.push(start.elapsed()); + } + } + } + + let n = latencies.len(); + let avg_latency = latencies.iter().copied().sum::() / n as u32; + let min_latency = latencies.iter().min().unwrap(); + let max_latency = latencies.iter().max().unwrap(); + println!("average latency: {avg_latency:?} (min: {min_latency:?}, max: {max_latency:?})"); + + let mut longest: Vec<_> = latencies.iter().enumerate().map(|(i, d)| (d, i)).collect(); + longest.sort_unstable_by(|a, b| b.cmp(a)); + + println!("\nlongest iterations:"); + for (duration, index) in &longest[..10] { + println!(" {index}: {duration:?}") + } + + Ok(()) +} diff --git a/libraries/shared-memory-server/src/channel.rs b/libraries/shared-memory-server/src/channel.rs new file mode 100644 index 0000000000000000000000000000000000000000..3f63c3c9f738dd4d7319b52bf091521ec195e4d4 --- /dev/null +++ b/libraries/shared-memory-server/src/channel.rs @@ -0,0 +1,225 @@ +use eyre::{eyre, Context}; +use raw_sync_2::events::{Event, EventImpl, EventInit, EventState}; +use serde::{Deserialize, Serialize}; +use shared_memory_extended::Shmem; +use std::{ + mem, slice, + sync::atomic::{AtomicBool, AtomicU64}, + time::Duration, +}; + +pub struct ShmemChannel { + memory: Shmem, + server_event: Box, + client_event: Box, + disconnect_offset: usize, + len_offset: usize, + data_offset: usize, + server: bool, +} + +#[allow(clippy::missing_safety_doc)] +impl ShmemChannel { + pub unsafe fn new_server(memory: Shmem) -> eyre::Result { + let (server_event, server_event_len) = unsafe { Event::new(memory.as_ptr(), true) } + .map_err(|err| eyre!("failed to open raw server event: {err}"))?; + let (client_event, client_event_len) = + unsafe { Event::new(memory.as_ptr().wrapping_add(server_event_len), true) } + .map_err(|err| eyre!("failed to open raw client event: {err}"))?; + let (disconnect_offset, len_offset, data_offset) = + offsets(server_event_len, client_event_len); + + server_event + .set(EventState::Clear) + .map_err(|err| eyre!("failed to init server_event: {err}"))?; + client_event + .set(EventState::Clear) + .map_err(|err| eyre!("failed to init client_event: {err}"))?; + unsafe { + memory + .as_ptr() + .wrapping_add(disconnect_offset) + .cast::() + .write(AtomicBool::new(false)); + } + unsafe { + memory + .as_ptr() + .wrapping_add(len_offset) + .cast::() + .write(AtomicU64::new(0)); + } + + Ok(Self { + memory, + server_event, + client_event, + disconnect_offset, + len_offset, + data_offset, + server: true, + }) + } + + pub unsafe fn new_client(memory: Shmem) -> eyre::Result { + let (server_event, server_event_len) = unsafe { Event::from_existing(memory.as_ptr()) } + .map_err(|err| eyre!("failed to open raw server event: {err}"))?; + let (client_event, client_event_len) = + unsafe { Event::from_existing(memory.as_ptr().wrapping_add(server_event_len)) } + .map_err(|err| eyre!("failed to open raw client event: {err}"))?; + let (disconnect_offset, len_offset, data_offset) = + offsets(server_event_len, client_event_len); + + Ok(Self { + memory, + server_event, + client_event, + disconnect_offset, + len_offset, + data_offset, + server: false, + }) + } + + pub fn send(&mut self, value: &T) -> eyre::Result<()> + where + T: Serialize + std::fmt::Debug, + { + let msg = bincode::serialize(value).wrap_err("failed to serialize value")?; + + self.send_raw(&msg) + } + + fn send_raw(&mut self, msg: &[u8]) -> Result<(), eyre::ErrReport> { + assert!(msg.len() <= self.memory.len() - self.data_offset); + // write data first + unsafe { + self.data_mut() + .copy_from_nonoverlapping(msg.as_ptr(), msg.len()); + } + // write len second for synchronization + self.data_len() + .store(msg.len() as u64, std::sync::atomic::Ordering::Release); + + // signal event + let event = if self.server { + &self.client_event + } else { + &self.server_event + }; + event + .set(EventState::Signaled) + .map_err(|err| eyre!("failed to send message over ShmemChannel: {err}"))?; + + let disconnected = self.disconnect().load(std::sync::atomic::Ordering::Acquire); + if disconnected { + eyre::bail!("server closed the connection"); + } + + Ok(()) + } + + pub fn receive(&mut self, timeout: Option) -> eyre::Result> + where + T: for<'a> Deserialize<'a> + std::fmt::Debug, + { + // wait for event + let event = if self.server { + &self.server_event + } else { + &self.client_event + }; + let timeout = timeout + .map(raw_sync_2::Timeout::Val) + .unwrap_or(raw_sync_2::Timeout::Infinite); + event + .wait(timeout) + .map_err(|err| eyre!("failed to receive from ShmemChannel: {err}"))?; + + // check for disconnect first + if self.disconnect().load(std::sync::atomic::Ordering::Acquire) { + if self.server { + tracing::trace!("shm client disconnected"); + } else { + tracing::error!("shm server disconnected"); + } + return Ok(None); + } + + // then read len for synchronization + let msg_len = self.data_len().load(std::sync::atomic::Ordering::Acquire) as usize; + assert_ne!(msg_len, 0); + assert!(msg_len < self.memory.len() - self.data_offset); + + // finally read the data + let value_raw = unsafe { slice::from_raw_parts(self.data(), msg_len) }; + + bincode::deserialize(value_raw) + .wrap_err("failed to deserialize value") + .map(|v| Some(v)) + } + + fn disconnect(&self) -> &AtomicBool { + unsafe { + &*self + .memory + .as_ptr() + .wrapping_add(self.disconnect_offset) + .cast::() + } + } + + fn data_len(&self) -> &AtomicU64 { + unsafe { + &*self + .memory + .as_ptr() + .wrapping_add(self.len_offset) + .cast::() + } + } + + fn data(&self) -> *const u8 { + self.memory.as_ptr().wrapping_add(self.data_offset) + } + + fn data_mut(&mut self) -> *mut u8 { + self.memory.as_ptr().wrapping_add(self.data_offset) + } +} + +fn offsets(server_event_len: usize, client_event_len: usize) -> (usize, usize, usize) { + let disconnect_offset = server_event_len + client_event_len; + let len_offset = disconnect_offset + mem::size_of::(); + let data_offset = len_offset + mem::size_of::(); + (disconnect_offset, len_offset, data_offset) +} + +unsafe impl Send for ShmemChannel {} + +impl Drop for ShmemChannel { + fn drop(&mut self) { + if self.server { + // server must only exit after client is disconnected + let disconnected = self.disconnect().load(std::sync::atomic::Ordering::Acquire); + if disconnected { + tracing::debug!("closing ShmemServer after client disconnect"); + } else { + tracing::error!("ShmemServer closed before client disconnect"); + + self.disconnect() + .store(true, std::sync::atomic::Ordering::Release); + } + } else { + tracing::debug!("disconnecting client"); + + self.disconnect() + .store(true, std::sync::atomic::Ordering::Release); + + // wake up server + if let Err(err) = self.server_event.set(EventState::Signaled) { + tracing::warn!("failed to signal ShmemChannel disconnect: {err}"); + } + } + } +} diff --git a/libraries/shared-memory-server/src/lib.rs b/libraries/shared-memory-server/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5404eb28a2f9e4d0bd66998a615f6319730e9239 --- /dev/null +++ b/libraries/shared-memory-server/src/lib.rs @@ -0,0 +1,79 @@ +#![allow(clippy::missing_safety_doc)] + +use self::channel::ShmemChannel; +use eyre::{eyre, Context}; +use serde::{Deserialize, Serialize}; +pub use shared_memory_extended::{Shmem, ShmemConf}; +use std::marker::PhantomData; +use std::time::Duration; + +mod channel; + +pub struct ShmemServer { + channel: ShmemChannel, + reply_expected: bool, + phantom: PhantomData<(T, U)>, +} + +impl ShmemServer { + pub unsafe fn new(memory: Shmem) -> eyre::Result { + Ok(Self { + channel: ShmemChannel::new_server(memory)?, + reply_expected: false, + phantom: PhantomData, + }) + } + + pub fn listen(&mut self) -> eyre::Result> + where + T: for<'a> Deserialize<'a> + std::fmt::Debug, + { + assert!(!self.reply_expected); + let result = self.channel.receive(None); + if matches!(result, Ok(Some(_))) { + self.reply_expected = true; + } + + result + } + + pub fn send_reply(&mut self, value: &U) -> eyre::Result<()> + where + U: Serialize + std::fmt::Debug, + { + assert!(self.reply_expected); + self.channel.send(value)?; + self.reply_expected = false; + Ok(()) + } +} + +pub struct ShmemClient { + channel: ShmemChannel, + timeout: Option, + phantom: PhantomData<(T, U)>, +} + +impl ShmemClient { + pub unsafe fn new(memory: Shmem, timeout: Option) -> eyre::Result { + Ok(Self { + channel: ShmemChannel::new_client(memory)?, + timeout, + phantom: PhantomData, + }) + } + + pub fn request(&mut self, value: &T) -> eyre::Result + where + T: Serialize + std::fmt::Debug, + U: for<'a> Deserialize<'a> + std::fmt::Debug, + { + self.channel + .send(value) + .wrap_err("failed to send request")?; + self.channel + .receive(self.timeout) + .wrap_err("failed to receive reply")? + .ok_or_else(|| eyre!("server disconnected unexpectedly")) + } +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000000000000000000000000000000000000..0a2102d4fb51e04080795dc9b4cb6d46b9a5b2d2 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "1.76" +components = ["rustfmt", "clippy"] diff --git a/tool_nodes/dora-record/Cargo.toml b/tool_nodes/dora-record/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..246afbcf7b9cc6e6eee16550e3cafcfe91a19f59 --- /dev/null +++ b/tool_nodes/dora-record/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "dora-record" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { version = "1.36.0", features = ["fs", "rt", "rt-multi-thread"] } +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" +chrono = "0.4.31" +dora-tracing = { workspace = true } +parquet = { version = "52", features = ["async"] } diff --git a/tool_nodes/dora-record/src/main.rs b/tool_nodes/dora-record/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d9268fe718d4ff7658f80ff3e67548d98a0fd8f --- /dev/null +++ b/tool_nodes/dora-record/src/main.rs @@ -0,0 +1,173 @@ +use chrono::{DateTime, Utc}; +use dora_node_api::{ + self, + arrow::{ + array::{ + make_array, Array, ListArray, StringArray, TimestampMillisecondArray, UInt64Array, + }, + buffer::{OffsetBuffer, ScalarBuffer}, + datatypes::{DataType, Field, Schema}, + record_batch::RecordBatch, + }, + DoraNode, Event, Metadata, +}; +use dora_tracing::telemetry::deserialize_to_hashmap; +use eyre::{Context, ContextCompat}; +use parquet::{arrow::AsyncArrowWriter, basic::BrotliLevel, file::properties::WriterProperties}; +use std::{collections::HashMap, path::PathBuf, sync::Arc}; +use tokio::sync::mpsc; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let (node, mut events) = DoraNode::init_from_env()?; + let dataflow_id = node.dataflow_id(); + let mut writers = HashMap::new(); + + while let Some(event) = events.recv() { + match event { + Event::Input { id, data, metadata } => { + match writers.get(&id) { + None => { + let field_uhlc = Field::new("timestamp_uhlc", DataType::UInt64, false); + let field_utc_epoch = Field::new( + "timestamp_utc", + DataType::Timestamp( + dora_node_api::arrow::datatypes::TimeUnit::Millisecond, + None, + ), + false, + ); + let field_trace_id = Field::new("trace_id", DataType::Utf8, true); + let field_span_id = Field::new("span_id", DataType::Utf8, true); + let field_values = + Arc::new(Field::new("item", data.data_type().clone(), true)); + let field_data = Field::new(id.clone(), DataType::List(field_values), true); + + let schema = Arc::new(Schema::new(vec![ + field_trace_id, + field_span_id, + field_uhlc, + field_utc_epoch, + field_data, + ])); + let dataflow_dir = PathBuf::from("out").join(dataflow_id.to_string()); + if !dataflow_dir.exists() { + std::fs::create_dir_all(&dataflow_dir) + .context("could not create dataflow_dir")?; + } + let file = + tokio::fs::File::create(dataflow_dir.join(format!("{id}.parquet"))) + .await + .context("Couldn't create write file")?; + let mut writer = AsyncArrowWriter::try_new( + file, + schema.clone(), + Some( + WriterProperties::builder() + .set_compression(parquet::basic::Compression::BROTLI( + BrotliLevel::default(), + )) + .build(), + ), + ) + .context("Could not create parquet writer")?; + let (tx, mut rx) = mpsc::channel(10); + + // Per Input thread + let join_handle = tokio::spawn(async move { + while let Some((data, metadata)) = rx.recv().await { + if let Err(e) = + write_event(&mut writer, data, &metadata, schema.clone()).await + { + println!("Error writing event data into parquet file: {:?}", e) + }; + } + writer.close().await + }); + tx.send((data.into(), metadata)) + .await + .context("Could not send event data into writer loop")?; + writers.insert(id, (tx, join_handle)); + } + Some((tx, _)) => { + tx.send((data.into(), metadata)) + .await + .context("Could not send event data into writer loop")?; + } + }; + } + Event::InputClosed { id } => match writers.remove(&id) { + None => {} + Some(tx) => drop(tx), + }, + _ => {} + } + } + + for (id, (tx, join_handle)) in writers { + drop(tx); + join_handle + .await + .context("Writer thread failed")? + .context(format!( + "Could not close the Parquet writer for {id} parquet writer" + ))?; + } + + Ok(()) +} + +/// Write a row of data into the writer +async fn write_event( + writer: &mut AsyncArrowWriter, + data: Arc, + metadata: &Metadata, + schema: Arc, +) -> eyre::Result<()> { + let offsets = OffsetBuffer::new(ScalarBuffer::from(vec![0, data.len() as i32])); + let field = Arc::new(Field::new("item", data.data_type().clone(), true)); + let list = ListArray::new(field, offsets, data.clone(), None); + + let timestamp = metadata.timestamp(); + let timestamp_uhlc = UInt64Array::from(vec![timestamp.get_time().0]); + let timestamp_uhlc = make_array(timestamp_uhlc.into()); + let system_time = timestamp.get_time().to_system_time(); + + let dt: DateTime = system_time.into(); + let timestamp_utc = TimestampMillisecondArray::from(vec![dt.timestamp_millis()]); + let timestamp_utc = make_array(timestamp_utc.into()); + + let string_otel_context = metadata.parameters.open_telemetry_context.to_string(); + let otel_context = deserialize_to_hashmap(&string_otel_context); + let traceparent = otel_context.get("traceparent"); + let trace_id = match traceparent { + None => "", + Some(trace) => trace.split('-').nth(1).context("Trace is malformatted")?, + }; + let span_id = match traceparent { + None => "", + Some(trace) => trace.split('-').nth(2).context("Trace is malformatted")?, + }; + let trace_id_array = StringArray::from(vec![trace_id]); + let trace_id_array = make_array(trace_id_array.into()); + let span_id_array = StringArray::from(vec![span_id]); + let span_id_array = make_array(span_id_array.into()); + + let record = RecordBatch::try_new( + schema, + vec![ + trace_id_array, + span_id_array, + timestamp_uhlc, + timestamp_utc, + make_array(list.into()), + ], + ) + .context("Could not create record batch with the given data")?; + writer + .write(&record) + .await + .context("Could not write recordbatch to file")?; + + Ok(()) +} diff --git a/tool_nodes/dora-rerun/Cargo.toml b/tool_nodes/dora-rerun/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9cb108126c12f91432a6119a3c04ff3edf27f519 --- /dev/null +++ b/tool_nodes/dora-rerun/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "dora-rerun" +version.workspace = true +edition = "2021" +documentation.workspace = true +description.workspace = true +license.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dora-node-api = { workspace = true, features = ["tracing"] } +eyre = "0.6.8" +tokio = { version = "1.36.0", features = ["rt"] } +rerun = { version = "0.15.1", features = ["web_viewer", "image"] } +ndarray = "0.15.6" diff --git a/tool_nodes/dora-rerun/README.md b/tool_nodes/dora-rerun/README.md new file mode 100644 index 0000000000000000000000000000000000000000..75973851adb342a25dbba96219d85b3b95b62aad --- /dev/null +++ b/tool_nodes/dora-rerun/README.md @@ -0,0 +1,39 @@ +# dora-rerun + +dora visualization using `rerun` + +This nodes is still experimental and format for passing Images, Bounding boxes, and text are probably going to change in the future. + +## Getting Started + +```bash +cargo install --force rerun-cli@0.15.1 + +## To install this package +git clone git@github.com:dora-rs/dora.git +cargo install --git https://github.com/dora-rs/dora dora-rerun +``` + +## Adding to existing graph: + +```yaml +- id: rerun + custom: + source: dora-rerun + inputs: + image: webcam/image + text: webcam/text + boxes2d: object_detection/bbox + envs: + IMAGE_WIDTH: 960 + IMAGE_HEIGHT: 540 + IMAGE_DEPTH: 3 + RERUN_MEMORY_LIMIT: 25% +``` + +## Configurations + +- IMAGE_WIDTH: Image width in pixels +- IMAGE_HEIGHT: Image height in heights +- IMAGE_DEPTH: Image depth +- RERUN_MEMORY_LIMIT: Rerun memory limit diff --git a/tool_nodes/dora-rerun/src/main.rs b/tool_nodes/dora-rerun/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..3bf8c2311c7f1789653a3ccd67565da080817f04 --- /dev/null +++ b/tool_nodes/dora-rerun/src/main.rs @@ -0,0 +1,132 @@ +//! Demonstrates the most barebone usage of the Rerun SDK. + +use std::env::VarError; + +use dora_node_api::{ + arrow::array::{Float32Array, StringArray, UInt8Array}, + DoraNode, Event, +}; +use eyre::{eyre, Context, Result}; +use rerun::{ + external::re_types::ArrowBuffer, SpawnOptions, TensorBuffer, TensorData, TensorDimension, +}; + +fn main() -> Result<()> { + // rerun `serve()` requires to have a running Tokio runtime in the current context. + let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime"); + let _guard = rt.enter(); + + let (_node, mut events) = + DoraNode::init_from_env().context("Could not initialize dora node")?; + + // Limit memory usage + let mut options = SpawnOptions::default(); + + let memory_limit = match std::env::var("RERUN_MEMORY_LIMIT") { + Ok(memory_limit) => memory_limit + .parse::() + .context("Could not parse RERUN_MEMORY_LIMIT value")?, + Err(VarError::NotUnicode(_)) => { + return Err(eyre!("RERUN_MEMORY_LIMIT env variable is not unicode")); + } + Err(VarError::NotPresent) => "25%".to_string(), + }; + + options.memory_limit = memory_limit; + + let rec = rerun::RecordingStreamBuilder::new("dora-rerun") + .spawn_opts(&options, None) + .context("Could not spawn rerun visualization")?; + + while let Some(event) = events.recv() { + if let Event::Input { + id, + data, + metadata: _, + } = event + { + if id.as_str().contains("image") { + let shape = vec![ + TensorDimension { + name: Some("height".into()), + size: std::env::var(format!("{}_HEIGHT", id.as_str().to_uppercase())) + .context(format!( + "Could not read {}_HEIGHT env variable for parsing the image", + id.as_str().to_uppercase() + ))? + .parse() + .context(format!( + "Could not parse env {}_HEIGHT", + id.as_str().to_uppercase() + ))?, + }, + TensorDimension { + name: Some("width".into()), + size: std::env::var(format!("{}_WIDTH", id.as_str().to_uppercase())) + .context(format!( + "Could not read {}_WIDTH env variable for parsing the image", + id.as_str().to_uppercase() + ))? + .parse() + .context(format!( + "Could not parse env {}_WIDTH", + id.as_str().to_uppercase() + ))?, + }, + TensorDimension { + name: Some("depth".into()), + size: std::env::var(format!("{}_DEPTH", id.as_str().to_uppercase())) + .context(format!( + "Could not read {}_DEPTH env variable for parsing the image", + id.as_str().to_uppercase() + ))? + .parse() + .context(format!( + "Could not parse env {}_DEPTH", + id.as_str().to_uppercase() + ))?, + }, + ]; + + let buffer: UInt8Array = data.to_data().into(); + let buffer: &[u8] = buffer.values(); + let buffer = TensorBuffer::U8(ArrowBuffer::from(buffer)); + let tensordata = TensorData::new(shape.clone(), buffer); + let image = rerun::Image::new(tensordata); + + rec.log(id.as_str(), &image) + .context("could not log image")?; + } else if id.as_str().contains("textlog") { + let buffer: StringArray = data.to_data().into(); + buffer.iter().try_for_each(|string| -> Result<()> { + if let Some(str) = string { + rec.log(id.as_str(), &rerun::TextLog::new(str)) + .wrap_err("Could not log text") + } else { + Ok(()) + } + })?; + } else if id.as_str().contains("boxes2d") { + let buffer: Float32Array = data.to_data().into(); + let buffer: &[f32] = buffer.values(); + let mut centers = vec![]; + let mut sizes = vec![]; + let mut classes = vec![]; + buffer.chunks(6).for_each(|block| { + if let [x, y, w, h, _conf, cls] = block { + centers.push((*x, *y)); + sizes.push((*w, *h)); + classes.push(*cls as u16); + } + }); + rec.log( + id.as_str(), + &rerun::Boxes2D::from_centers_and_sizes(centers, sizes).with_class_ids(classes), + ) + .wrap_err("Could not log Boxes2D")?; + } + } + } + + Ok(()) +}